2024-12-16 21:00:14,689 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@57bc27f5 2024-12-16 21:00:14,741 main DEBUG Took 0.048348 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-16 21:00:14,741 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-16 21:00:14,742 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-16 21:00:14,743 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-16 21:00:14,746 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 21:00:14,762 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-16 21:00:14,782 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 21:00:14,784 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 21:00:14,785 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 21:00:14,785 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 21:00:14,786 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 21:00:14,786 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 21:00:14,787 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 21:00:14,787 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 21:00:14,788 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 21:00:14,788 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 21:00:14,789 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 21:00:14,789 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 21:00:14,790 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 21:00:14,790 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 21:00:14,791 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 21:00:14,791 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 21:00:14,792 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 21:00:14,792 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 21:00:14,793 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 21:00:14,793 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 21:00:14,794 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 21:00:14,794 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 21:00:14,795 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 21:00:14,795 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 21:00:14,796 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 21:00:14,796 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-16 21:00:14,798 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 21:00:14,800 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-16 21:00:14,802 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-16 21:00:14,803 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-16 21:00:14,804 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-16 21:00:14,805 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-16 21:00:14,815 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-16 21:00:14,818 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-16 21:00:14,820 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-16 21:00:14,821 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-16 21:00:14,821 main DEBUG createAppenders(={Console}) 2024-12-16 21:00:14,822 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@57bc27f5 initialized 2024-12-16 21:00:14,822 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@57bc27f5 2024-12-16 21:00:14,823 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@57bc27f5 OK. 2024-12-16 21:00:14,823 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-16 21:00:14,824 main DEBUG OutputStream closed 2024-12-16 21:00:14,824 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-16 21:00:14,824 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-16 21:00:14,824 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@57cf54e1 OK 2024-12-16 21:00:14,932 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-16 21:00:14,939 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-16 21:00:14,941 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-16 21:00:14,942 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-16 21:00:14,945 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-16 21:00:14,950 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-16 21:00:14,950 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-16 21:00:14,954 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-16 21:00:14,955 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-16 21:00:14,955 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-16 21:00:14,956 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-16 21:00:14,956 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-16 21:00:14,956 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-16 21:00:14,957 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-16 21:00:14,957 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-16 21:00:14,958 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-16 21:00:14,959 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-16 21:00:14,960 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-16 21:00:14,963 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-16 21:00:14,964 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@29ca3d04) with optional ClassLoader: null 2024-12-16 21:00:14,964 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-16 21:00:14,965 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@29ca3d04] started OK. 2024-12-16T21:00:14,980 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestExportSnapshot timeout: 13 mins 2024-12-16 21:00:14,984 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-16 21:00:14,985 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-16T21:00:15,371 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17 2024-12-16T21:00:15,372 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestSecureExportSnapshot timeout: 13 mins 2024-12-16T21:00:15,408 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-16T21:00:15,622 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-16T21:00:15,626 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72, deleteOnExit=true 2024-12-16T21:00:15,626 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-16T21:00:15,627 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/test.cache.data in system properties and HBase conf 2024-12-16T21:00:15,628 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/hadoop.tmp.dir in system properties and HBase conf 2024-12-16T21:00:15,629 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/hadoop.log.dir in system properties and HBase conf 2024-12-16T21:00:15,630 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-16T21:00:15,630 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-16T21:00:15,631 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-16T21:00:15,739 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-16T21:00:15,744 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-16T21:00:15,745 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-16T21:00:15,746 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-16T21:00:15,746 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-16T21:00:15,747 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-16T21:00:15,748 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-16T21:00:15,748 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-16T21:00:15,749 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-16T21:00:15,749 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-16T21:00:15,750 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/nfs.dump.dir in system properties and HBase conf 2024-12-16T21:00:15,751 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/java.io.tmpdir in system properties and HBase conf 2024-12-16T21:00:15,751 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-16T21:00:15,752 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-16T21:00:15,752 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-16T21:00:16,774 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-16T21:00:16,893 INFO [Time-limited test {}] log.Log(170): Logging initialized @3378ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-16T21:00:17,002 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T21:00:17,094 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T21:00:17,124 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T21:00:17,125 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T21:00:17,126 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-16T21:00:17,143 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T21:00:17,153 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a82d853{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/hadoop.log.dir/,AVAILABLE} 2024-12-16T21:00:17,154 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@343317a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-16T21:00:17,421 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7883a2cb{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/java.io.tmpdir/jetty-localhost-44909-hadoop-hdfs-3_4_1-tests_jar-_-any-9983885827212575100/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-16T21:00:17,432 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2d3d9b09{HTTP/1.1, (http/1.1)}{localhost:44909} 2024-12-16T21:00:17,433 INFO [Time-limited test {}] server.Server(415): Started @3919ms 2024-12-16T21:00:17,823 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T21:00:17,829 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T21:00:17,834 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T21:00:17,834 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T21:00:17,835 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-16T21:00:17,835 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7622634b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/hadoop.log.dir/,AVAILABLE} 2024-12-16T21:00:17,836 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d5648e9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-16T21:00:17,964 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5a633356{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/java.io.tmpdir/jetty-localhost-43589-hadoop-hdfs-3_4_1-tests_jar-_-any-8274433368191356890/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T21:00:17,965 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@39bccd2b{HTTP/1.1, (http/1.1)}{localhost:43589} 2024-12-16T21:00:17,965 INFO [Time-limited test {}] server.Server(415): Started @4452ms 2024-12-16T21:00:18,025 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-16T21:00:18,186 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T21:00:18,198 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T21:00:18,218 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T21:00:18,219 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T21:00:18,219 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-16T21:00:18,222 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@45f72ff{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/hadoop.log.dir/,AVAILABLE} 2024-12-16T21:00:18,223 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@a110049{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-16T21:00:18,392 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@14090edc{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/java.io.tmpdir/jetty-localhost-46059-hadoop-hdfs-3_4_1-tests_jar-_-any-11480721877170533488/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T21:00:18,393 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5be8101a{HTTP/1.1, (http/1.1)}{localhost:46059} 2024-12-16T21:00:18,393 INFO [Time-limited test {}] server.Server(415): Started @4880ms 2024-12-16T21:00:18,397 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-16T21:00:18,520 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T21:00:18,526 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T21:00:18,536 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T21:00:18,536 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T21:00:18,537 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-16T21:00:18,538 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2e1b48b6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/hadoop.log.dir/,AVAILABLE} 2024-12-16T21:00:18,539 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7f4d5ab4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-16T21:00:18,607 WARN [Thread-106 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data1/current/BP-1970874171-172.17.0.2-1734382816445/current, will proceed with Du for space computation calculation, 2024-12-16T21:00:18,608 WARN [Thread-105 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data3/current/BP-1970874171-172.17.0.2-1734382816445/current, will proceed with Du for space computation calculation, 2024-12-16T21:00:18,614 WARN [Thread-108 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data2/current/BP-1970874171-172.17.0.2-1734382816445/current, will proceed with Du for space computation calculation, 2024-12-16T21:00:18,618 WARN [Thread-107 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data4/current/BP-1970874171-172.17.0.2-1734382816445/current, will proceed with Du for space computation calculation, 2024-12-16T21:00:18,701 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6bdef31c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/java.io.tmpdir/jetty-localhost-40415-hadoop-hdfs-3_4_1-tests_jar-_-any-12840436696309356674/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T21:00:18,703 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2cc41f37{HTTP/1.1, (http/1.1)}{localhost:40415} 2024-12-16T21:00:18,703 INFO [Time-limited test {}] server.Server(415): Started @5190ms 2024-12-16T21:00:18,705 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-16T21:00:18,744 WARN [Thread-82 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-16T21:00:18,745 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-16T21:00:18,832 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdfd8ca3240c94fd9 with lease ID 0x7f65634273383e25: Processing first storage report for DS-cad7b0df-8300-46e6-8020-3278417c175b from datanode DatanodeRegistration(127.0.0.1:39445, datanodeUuid=a092c635-3b62-48e5-b8a3-2cb297fa0a07, infoPort=43731, infoSecurePort=0, ipcPort=44195, storageInfo=lv=-57;cid=testClusterID;nsid=495639440;c=1734382816445) 2024-12-16T21:00:18,834 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdfd8ca3240c94fd9 with lease ID 0x7f65634273383e25: from storage DS-cad7b0df-8300-46e6-8020-3278417c175b node DatanodeRegistration(127.0.0.1:39445, datanodeUuid=a092c635-3b62-48e5-b8a3-2cb297fa0a07, infoPort=43731, infoSecurePort=0, ipcPort=44195, storageInfo=lv=-57;cid=testClusterID;nsid=495639440;c=1734382816445), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-16T21:00:18,834 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x91e9b19d4f77ad87 with lease ID 0x7f65634273383e24: Processing first storage report for DS-0cf8bf6a-38e2-4ed2-80f6-0482d7a64cda from datanode DatanodeRegistration(127.0.0.1:44571, datanodeUuid=ea823c6c-168e-4f2e-9a2f-daa6b4a4e26f, infoPort=39437, infoSecurePort=0, ipcPort=37339, storageInfo=lv=-57;cid=testClusterID;nsid=495639440;c=1734382816445) 2024-12-16T21:00:18,835 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x91e9b19d4f77ad87 with lease ID 0x7f65634273383e24: from storage DS-0cf8bf6a-38e2-4ed2-80f6-0482d7a64cda node DatanodeRegistration(127.0.0.1:44571, datanodeUuid=ea823c6c-168e-4f2e-9a2f-daa6b4a4e26f, infoPort=39437, infoSecurePort=0, ipcPort=37339, storageInfo=lv=-57;cid=testClusterID;nsid=495639440;c=1734382816445), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-16T21:00:18,835 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdfd8ca3240c94fd9 with lease ID 0x7f65634273383e25: Processing first storage report for DS-74f895d6-af6b-4510-9fd9-e2d7e34a6ac4 from datanode DatanodeRegistration(127.0.0.1:39445, datanodeUuid=a092c635-3b62-48e5-b8a3-2cb297fa0a07, infoPort=43731, infoSecurePort=0, ipcPort=44195, storageInfo=lv=-57;cid=testClusterID;nsid=495639440;c=1734382816445) 2024-12-16T21:00:18,835 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdfd8ca3240c94fd9 with lease ID 0x7f65634273383e25: from storage DS-74f895d6-af6b-4510-9fd9-e2d7e34a6ac4 node DatanodeRegistration(127.0.0.1:39445, datanodeUuid=a092c635-3b62-48e5-b8a3-2cb297fa0a07, infoPort=43731, infoSecurePort=0, ipcPort=44195, storageInfo=lv=-57;cid=testClusterID;nsid=495639440;c=1734382816445), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-16T21:00:18,836 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x91e9b19d4f77ad87 with lease ID 0x7f65634273383e24: Processing first storage report for DS-68618a8c-aaec-4b2a-8c41-ad6ec512af7d from datanode DatanodeRegistration(127.0.0.1:44571, datanodeUuid=ea823c6c-168e-4f2e-9a2f-daa6b4a4e26f, infoPort=39437, infoSecurePort=0, ipcPort=37339, storageInfo=lv=-57;cid=testClusterID;nsid=495639440;c=1734382816445) 2024-12-16T21:00:18,836 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x91e9b19d4f77ad87 with lease ID 0x7f65634273383e24: from storage DS-68618a8c-aaec-4b2a-8c41-ad6ec512af7d node DatanodeRegistration(127.0.0.1:44571, datanodeUuid=ea823c6c-168e-4f2e-9a2f-daa6b4a4e26f, infoPort=39437, infoSecurePort=0, ipcPort=37339, storageInfo=lv=-57;cid=testClusterID;nsid=495639440;c=1734382816445), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-16T21:00:18,961 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data5/current/BP-1970874171-172.17.0.2-1734382816445/current, will proceed with Du for space computation calculation, 2024-12-16T21:00:18,961 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data6/current/BP-1970874171-172.17.0.2-1734382816445/current, will proceed with Du for space computation calculation, 2024-12-16T21:00:19,022 WARN [Thread-129 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-16T21:00:19,052 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcf1c469c943a3a8a with lease ID 0x7f65634273383e26: Processing first storage report for DS-d9807c4d-d3e1-401e-be38-1a140a301b09 from datanode DatanodeRegistration(127.0.0.1:33147, datanodeUuid=25a9c4e7-cb6f-40b7-aff9-f0b9825d31c0, infoPort=44331, infoSecurePort=0, ipcPort=38085, storageInfo=lv=-57;cid=testClusterID;nsid=495639440;c=1734382816445) 2024-12-16T21:00:19,053 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcf1c469c943a3a8a with lease ID 0x7f65634273383e26: from storage DS-d9807c4d-d3e1-401e-be38-1a140a301b09 node DatanodeRegistration(127.0.0.1:33147, datanodeUuid=25a9c4e7-cb6f-40b7-aff9-f0b9825d31c0, infoPort=44331, infoSecurePort=0, ipcPort=38085, storageInfo=lv=-57;cid=testClusterID;nsid=495639440;c=1734382816445), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-16T21:00:19,053 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcf1c469c943a3a8a with lease ID 0x7f65634273383e26: Processing first storage report for DS-bde33f80-14bb-45eb-b34c-0bfc73b29a6f from datanode DatanodeRegistration(127.0.0.1:33147, datanodeUuid=25a9c4e7-cb6f-40b7-aff9-f0b9825d31c0, infoPort=44331, infoSecurePort=0, ipcPort=38085, storageInfo=lv=-57;cid=testClusterID;nsid=495639440;c=1734382816445) 2024-12-16T21:00:19,054 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcf1c469c943a3a8a with lease ID 0x7f65634273383e26: from storage DS-bde33f80-14bb-45eb-b34c-0bfc73b29a6f node DatanodeRegistration(127.0.0.1:33147, datanodeUuid=25a9c4e7-cb6f-40b7-aff9-f0b9825d31c0, infoPort=44331, infoSecurePort=0, ipcPort=38085, storageInfo=lv=-57;cid=testClusterID;nsid=495639440;c=1734382816445), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-16T21:00:19,286 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17 2024-12-16T21:00:19,401 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/zookeeper_0, clientPort=61742, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-16T21:00:19,428 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=61742 2024-12-16T21:00:19,448 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T21:00:19,452 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T21:00:19,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741825_1001 (size=7) 2024-12-16T21:00:19,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741825_1001 (size=7) 2024-12-16T21:00:19,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741825_1001 (size=7) 2024-12-16T21:00:20,235 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba with version=8 2024-12-16T21:00:20,235 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/hbase-staging 2024-12-16T21:00:20,362 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-16T21:00:20,621 INFO [Time-limited test {}] client.ConnectionUtils(129): master/6b259cf15929:0 server-side Connection retries=45 2024-12-16T21:00:20,639 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T21:00:20,639 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-16T21:00:20,639 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-16T21:00:20,640 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T21:00:20,640 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-16T21:00:20,811 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-16T21:00:20,875 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-16T21:00:20,883 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-16T21:00:20,887 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-16T21:00:20,912 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 18135 (auto-detected) 2024-12-16T21:00:20,913 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-16T21:00:20,933 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:34343 2024-12-16T21:00:20,943 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T21:00:20,946 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T21:00:20,958 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:34343 connecting to ZooKeeper ensemble=127.0.0.1:61742 2024-12-16T21:00:20,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:343430x0, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-16T21:00:20,999 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34343-0x1000b4e97660000 connected 2024-12-16T21:00:21,037 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-16T21:00:21,040 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T21:00:21,058 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-16T21:00:21,063 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34343 2024-12-16T21:00:21,063 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34343 2024-12-16T21:00:21,064 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34343 2024-12-16T21:00:21,064 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34343 2024-12-16T21:00:21,065 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34343 2024-12-16T21:00:21,071 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba, hbase.cluster.distributed=false 2024-12-16T21:00:21,141 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/6b259cf15929:0 server-side Connection retries=45 2024-12-16T21:00:21,141 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T21:00:21,141 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-16T21:00:21,142 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-16T21:00:21,142 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T21:00:21,142 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-16T21:00:21,145 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-16T21:00:21,148 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-16T21:00:21,149 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:42323 2024-12-16T21:00:21,152 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-16T21:00:21,161 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-16T21:00:21,163 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T21:00:21,166 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T21:00:21,171 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:42323 connecting to ZooKeeper ensemble=127.0.0.1:61742 2024-12-16T21:00:21,174 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:423230x0, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-16T21:00:21,175 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:423230x0, quorum=127.0.0.1:61742, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-16T21:00:21,178 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:423230x0, quorum=127.0.0.1:61742, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T21:00:21,178 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42323-0x1000b4e97660001 connected 2024-12-16T21:00:21,179 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-16T21:00:21,180 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42323 2024-12-16T21:00:21,180 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42323 2024-12-16T21:00:21,181 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42323 2024-12-16T21:00:21,187 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42323 2024-12-16T21:00:21,190 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42323 2024-12-16T21:00:21,217 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/6b259cf15929:0 server-side Connection retries=45 2024-12-16T21:00:21,218 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T21:00:21,218 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-16T21:00:21,219 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-16T21:00:21,219 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T21:00:21,219 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-16T21:00:21,219 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-16T21:00:21,220 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-16T21:00:21,222 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:36861 2024-12-16T21:00:21,223 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-16T21:00:21,230 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-16T21:00:21,231 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T21:00:21,234 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T21:00:21,238 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:36861 connecting to ZooKeeper ensemble=127.0.0.1:61742 2024-12-16T21:00:21,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:368610x0, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-16T21:00:21,242 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:368610x0, quorum=127.0.0.1:61742, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-16T21:00:21,244 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:368610x0, quorum=127.0.0.1:61742, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T21:00:21,245 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:368610x0, quorum=127.0.0.1:61742, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-16T21:00:21,246 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36861-0x1000b4e97660002 connected 2024-12-16T21:00:21,248 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36861 2024-12-16T21:00:21,250 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36861 2024-12-16T21:00:21,253 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36861 2024-12-16T21:00:21,255 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36861 2024-12-16T21:00:21,255 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36861 2024-12-16T21:00:21,275 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/6b259cf15929:0 server-side Connection retries=45 2024-12-16T21:00:21,275 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T21:00:21,275 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-16T21:00:21,275 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-16T21:00:21,275 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T21:00:21,276 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-16T21:00:21,276 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-16T21:00:21,276 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-16T21:00:21,278 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:41713 2024-12-16T21:00:21,279 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-16T21:00:21,280 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-16T21:00:21,282 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T21:00:21,284 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T21:00:21,287 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:41713 connecting to ZooKeeper ensemble=127.0.0.1:61742 2024-12-16T21:00:21,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:417130x0, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-16T21:00:21,294 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:417130x0, quorum=127.0.0.1:61742, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-16T21:00:21,302 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:417130x0, quorum=127.0.0.1:61742, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T21:00:21,302 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41713-0x1000b4e97660003 connected 2024-12-16T21:00:21,305 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-16T21:00:21,308 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41713 2024-12-16T21:00:21,308 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41713 2024-12-16T21:00:21,309 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41713 2024-12-16T21:00:21,312 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41713 2024-12-16T21:00:21,314 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41713 2024-12-16T21:00:21,319 INFO [master/6b259cf15929:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/6b259cf15929,34343,1734382820356 2024-12-16T21:00:21,327 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T21:00:21,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T21:00:21,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T21:00:21,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T21:00:21,345 DEBUG [M:0;6b259cf15929:34343 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;6b259cf15929:34343 2024-12-16T21:00:21,345 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/6b259cf15929,34343,1734382820356 2024-12-16T21:00:21,375 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-16T21:00:21,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-16T21:00:21,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T21:00:21,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-16T21:00:21,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-16T21:00:21,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T21:00:21,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T21:00:21,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T21:00:21,378 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-16T21:00:21,379 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-16T21:00:21,380 INFO [master/6b259cf15929:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/6b259cf15929,34343,1734382820356 from backup master directory 2024-12-16T21:00:21,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T21:00:21,384 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T21:00:21,384 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/6b259cf15929,34343,1734382820356 2024-12-16T21:00:21,384 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T21:00:21,384 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T21:00:21,385 WARN [master/6b259cf15929:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-16T21:00:21,385 INFO [master/6b259cf15929:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=6b259cf15929,34343,1734382820356 2024-12-16T21:00:21,391 INFO [master/6b259cf15929:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-16T21:00:21,393 INFO [master/6b259cf15929:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-16T21:00:21,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741826_1002 (size=42) 2024-12-16T21:00:21,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741826_1002 (size=42) 2024-12-16T21:00:21,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741826_1002 (size=42) 2024-12-16T21:00:21,516 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/hbase.id with ID: 232b9e51-26ba-4170-a627-bae45c1397ca 2024-12-16T21:00:21,570 INFO [master/6b259cf15929:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T21:00:21,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T21:00:21,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T21:00:21,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T21:00:21,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T21:00:21,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741827_1003 (size=196) 2024-12-16T21:00:21,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741827_1003 (size=196) 2024-12-16T21:00:21,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741827_1003 (size=196) 2024-12-16T21:00:22,052 INFO [master/6b259cf15929:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T21:00:22,054 INFO [master/6b259cf15929:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-16T21:00:22,070 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T21:00:22,075 INFO [master/6b259cf15929:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-16T21:00:22,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741828_1004 (size=1189) 2024-12-16T21:00:22,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741828_1004 (size=1189) 2024-12-16T21:00:22,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741828_1004 (size=1189) 2024-12-16T21:00:22,141 INFO [master/6b259cf15929:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/MasterData/data/master/store 2024-12-16T21:00:22,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741829_1005 (size=34) 2024-12-16T21:00:22,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741829_1005 (size=34) 2024-12-16T21:00:22,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741829_1005 (size=34) 2024-12-16T21:00:22,175 INFO [master/6b259cf15929:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-16T21:00:22,176 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:00:22,177 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-16T21:00:22,177 INFO [master/6b259cf15929:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T21:00:22,178 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T21:00:22,178 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-16T21:00:22,178 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T21:00:22,178 INFO [master/6b259cf15929:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T21:00:22,178 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-16T21:00:22,180 WARN [master/6b259cf15929:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/MasterData/data/master/store/.initializing 2024-12-16T21:00:22,181 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/MasterData/WALs/6b259cf15929,34343,1734382820356 2024-12-16T21:00:22,190 INFO [master/6b259cf15929:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-16T21:00:22,209 INFO [master/6b259cf15929:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6b259cf15929%2C34343%2C1734382820356, suffix=, logDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/MasterData/WALs/6b259cf15929,34343,1734382820356, archiveDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/MasterData/oldWALs, maxLogs=10 2024-12-16T21:00:22,243 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/MasterData/WALs/6b259cf15929,34343,1734382820356/6b259cf15929%2C34343%2C1734382820356.1734382822217, exclude list is [], retry=0 2024-12-16T21:00:22,269 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33147,DS-d9807c4d-d3e1-401e-be38-1a140a301b09,DISK] 2024-12-16T21:00:22,269 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44571,DS-0cf8bf6a-38e2-4ed2-80f6-0482d7a64cda,DISK] 2024-12-16T21:00:22,270 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39445,DS-cad7b0df-8300-46e6-8020-3278417c175b,DISK] 2024-12-16T21:00:22,274 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-16T21:00:22,340 INFO [master/6b259cf15929:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/MasterData/WALs/6b259cf15929,34343,1734382820356/6b259cf15929%2C34343%2C1734382820356.1734382822217 2024-12-16T21:00:22,355 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43731:43731),(127.0.0.1/127.0.0.1:39437:39437),(127.0.0.1/127.0.0.1:44331:44331)] 2024-12-16T21:00:22,356 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-16T21:00:22,357 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:00:22,361 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-16T21:00:22,364 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-16T21:00:22,415 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-16T21:00:22,453 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-16T21:00:22,458 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:00:22,461 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T21:00:22,462 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-16T21:00:22,468 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-16T21:00:22,468 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:00:22,469 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T21:00:22,469 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-16T21:00:22,472 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-16T21:00:22,472 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:00:22,474 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T21:00:22,477 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-16T21:00:22,481 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-16T21:00:22,481 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:00:22,482 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T21:00:22,486 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-16T21:00:22,488 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-16T21:00:22,499 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-16T21:00:22,505 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-16T21:00:22,511 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T21:00:22,512 INFO [master/6b259cf15929:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74191410, jitterRate=0.10553815960884094}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-16T21:00:22,518 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-16T21:00:22,520 INFO [master/6b259cf15929:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-16T21:00:22,570 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2af142cd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:00:22,609 INFO [master/6b259cf15929:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-16T21:00:22,629 INFO [master/6b259cf15929:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-16T21:00:22,629 INFO [master/6b259cf15929:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-16T21:00:22,632 INFO [master/6b259cf15929:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-16T21:00:22,635 INFO [master/6b259cf15929:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 3 msec 2024-12-16T21:00:22,642 INFO [master/6b259cf15929:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 6 msec 2024-12-16T21:00:22,643 INFO [master/6b259cf15929:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-16T21:00:22,684 INFO [master/6b259cf15929:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-16T21:00:22,697 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-16T21:00:22,700 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-16T21:00:22,702 INFO [master/6b259cf15929:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-16T21:00:22,703 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-16T21:00:22,705 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-16T21:00:22,707 INFO [master/6b259cf15929:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-16T21:00:22,710 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-16T21:00:22,711 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-16T21:00:22,712 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-16T21:00:22,713 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-16T21:00:22,722 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-16T21:00:22,724 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-16T21:00:22,727 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-16T21:00:22,727 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-16T21:00:22,727 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-16T21:00:22,727 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-16T21:00:22,728 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T21:00:22,728 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T21:00:22,728 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T21:00:22,728 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T21:00:22,729 INFO [master/6b259cf15929:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=6b259cf15929,34343,1734382820356, sessionid=0x1000b4e97660000, setting cluster-up flag (Was=false) 2024-12-16T21:00:22,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T21:00:22,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T21:00:22,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T21:00:22,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T21:00:22,746 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-16T21:00:22,747 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6b259cf15929,34343,1734382820356 2024-12-16T21:00:22,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T21:00:22,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T21:00:22,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T21:00:22,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T21:00:22,757 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-16T21:00:22,759 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6b259cf15929,34343,1734382820356 2024-12-16T21:00:22,842 DEBUG [RS:0;6b259cf15929:42323 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;6b259cf15929:42323 2024-12-16T21:00:22,844 INFO [RS:0;6b259cf15929:42323 {}] regionserver.HRegionServer(1008): ClusterId : 232b9e51-26ba-4170-a627-bae45c1397ca 2024-12-16T21:00:22,848 DEBUG [RS:0;6b259cf15929:42323 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-16T21:00:22,853 DEBUG [RS:1;6b259cf15929:36861 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;6b259cf15929:36861 2024-12-16T21:00:22,853 DEBUG [RS:2;6b259cf15929:41713 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;6b259cf15929:41713 2024-12-16T21:00:22,853 DEBUG [RS:0;6b259cf15929:42323 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-16T21:00:22,854 DEBUG [RS:0;6b259cf15929:42323 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-16T21:00:22,857 INFO [RS:1;6b259cf15929:36861 {}] regionserver.HRegionServer(1008): ClusterId : 232b9e51-26ba-4170-a627-bae45c1397ca 2024-12-16T21:00:22,857 DEBUG [RS:1;6b259cf15929:36861 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-16T21:00:22,857 INFO [RS:2;6b259cf15929:41713 {}] regionserver.HRegionServer(1008): ClusterId : 232b9e51-26ba-4170-a627-bae45c1397ca 2024-12-16T21:00:22,857 DEBUG [RS:2;6b259cf15929:41713 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-16T21:00:22,860 DEBUG [RS:1;6b259cf15929:36861 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-16T21:00:22,860 DEBUG [RS:1;6b259cf15929:36861 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-16T21:00:22,860 DEBUG [RS:0;6b259cf15929:42323 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-16T21:00:22,861 DEBUG [RS:2;6b259cf15929:41713 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-16T21:00:22,861 DEBUG [RS:2;6b259cf15929:41713 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-16T21:00:22,861 DEBUG [RS:0;6b259cf15929:42323 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b649956, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:00:22,863 DEBUG [RS:1;6b259cf15929:36861 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-16T21:00:22,864 DEBUG [RS:1;6b259cf15929:36861 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6268bbb1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:00:22,864 DEBUG [RS:0;6b259cf15929:42323 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15bb33ca, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6b259cf15929/172.17.0.2:0 2024-12-16T21:00:22,864 DEBUG [RS:1;6b259cf15929:36861 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c053736, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6b259cf15929/172.17.0.2:0 2024-12-16T21:00:22,869 INFO [RS:1;6b259cf15929:36861 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-16T21:00:22,869 INFO [RS:1;6b259cf15929:36861 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-16T21:00:22,869 INFO [RS:0;6b259cf15929:42323 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-16T21:00:22,869 INFO [RS:0;6b259cf15929:42323 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-16T21:00:22,869 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] master.HMaster(3390): Registered master coprocessor service: service=AccessControlService 2024-12-16T21:00:22,870 DEBUG [RS:2;6b259cf15929:41713 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-16T21:00:22,871 DEBUG [RS:2;6b259cf15929:41713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@391ef5c4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:00:22,875 DEBUG [RS:1;6b259cf15929:36861 {}] regionserver.HRegionServer(900): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-16T21:00:22,875 DEBUG [RS:0;6b259cf15929:42323 {}] regionserver.HRegionServer(900): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-16T21:00:22,876 INFO [RS:1;6b259cf15929:36861 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T21:00:22,876 DEBUG [RS:1;6b259cf15929:36861 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-16T21:00:22,876 INFO [master/6b259cf15929:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T21:00:22,876 INFO [RS:0;6b259cf15929:42323 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T21:00:22,876 DEBUG [RS:0;6b259cf15929:42323 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-16T21:00:22,874 DEBUG [RS:2;6b259cf15929:41713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@55bedf99, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6b259cf15929/172.17.0.2:0 2024-12-16T21:00:22,876 INFO [master/6b259cf15929:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver loaded, priority=536870912. 2024-12-16T21:00:22,876 INFO [RS:2;6b259cf15929:41713 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-16T21:00:22,877 INFO [RS:2;6b259cf15929:41713 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-16T21:00:22,878 INFO [RS:1;6b259cf15929:36861 {}] regionserver.HRegionServer(3073): reportForDuty to master=6b259cf15929,34343,1734382820356 with isa=6b259cf15929/172.17.0.2:36861, startcode=1734382821216 2024-12-16T21:00:22,878 INFO [RS:0;6b259cf15929:42323 {}] regionserver.HRegionServer(3073): reportForDuty to master=6b259cf15929,34343,1734382820356 with isa=6b259cf15929/172.17.0.2:42323, startcode=1734382821139 2024-12-16T21:00:22,880 DEBUG [RS:2;6b259cf15929:41713 {}] regionserver.HRegionServer(900): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-16T21:00:22,880 INFO [RS:2;6b259cf15929:41713 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T21:00:22,880 DEBUG [RS:2;6b259cf15929:41713 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-16T21:00:22,881 INFO [RS:2;6b259cf15929:41713 {}] regionserver.HRegionServer(3073): reportForDuty to master=6b259cf15929,34343,1734382820356 with isa=6b259cf15929/172.17.0.2:41713, startcode=1734382821274 2024-12-16T21:00:22,895 DEBUG [RS:0;6b259cf15929:42323 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-16T21:00:22,895 DEBUG [RS:2;6b259cf15929:41713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-16T21:00:22,895 DEBUG [RS:1;6b259cf15929:36861 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-16T21:00:22,943 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-16T21:00:22,944 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37547, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-16T21:00:22,944 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46929, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-16T21:00:22,944 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55741, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-16T21:00:22,954 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34343 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T21:00:22,961 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34343 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T21:00:22,962 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34343 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T21:00:22,963 INFO [master/6b259cf15929:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-16T21:00:22,969 INFO [master/6b259cf15929:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-16T21:00:22,977 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 6b259cf15929,34343,1734382820356 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-16T21:00:22,982 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/6b259cf15929:0, corePoolSize=5, maxPoolSize=5 2024-12-16T21:00:22,982 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/6b259cf15929:0, corePoolSize=5, maxPoolSize=5 2024-12-16T21:00:22,982 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/6b259cf15929:0, corePoolSize=5, maxPoolSize=5 2024-12-16T21:00:22,982 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/6b259cf15929:0, corePoolSize=5, maxPoolSize=5 2024-12-16T21:00:22,983 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/6b259cf15929:0, corePoolSize=10, maxPoolSize=10 2024-12-16T21:00:22,983 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/6b259cf15929:0, corePoolSize=1, maxPoolSize=1 2024-12-16T21:00:22,983 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/6b259cf15929:0, corePoolSize=2, maxPoolSize=2 2024-12-16T21:00:22,983 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/6b259cf15929:0, corePoolSize=1, maxPoolSize=1 2024-12-16T21:00:22,996 DEBUG [RS:0;6b259cf15929:42323 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-12-16T21:00:22,996 DEBUG [RS:1;6b259cf15929:36861 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-12-16T21:00:22,996 DEBUG [RS:2;6b259cf15929:41713 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-12-16T21:00:22,996 WARN [RS:0;6b259cf15929:42323 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-16T21:00:22,996 WARN [RS:1;6b259cf15929:36861 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-16T21:00:22,996 WARN [RS:2;6b259cf15929:41713 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-16T21:00:23,006 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-16T21:00:23,007 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-16T21:00:23,010 INFO [master/6b259cf15929:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1734382853009 2024-12-16T21:00:23,012 INFO [master/6b259cf15929:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-16T21:00:23,012 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:00:23,013 INFO [master/6b259cf15929:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-16T21:00:23,013 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-16T21:00:23,017 INFO [master/6b259cf15929:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-16T21:00:23,018 INFO [master/6b259cf15929:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-16T21:00:23,018 INFO [master/6b259cf15929:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-16T21:00:23,019 INFO [master/6b259cf15929:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-16T21:00:23,019 INFO [master/6b259cf15929:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-16T21:00:23,026 INFO [master/6b259cf15929:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-16T21:00:23,027 INFO [master/6b259cf15929:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-16T21:00:23,028 INFO [master/6b259cf15929:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-16T21:00:23,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741831_1007 (size=1039) 2024-12-16T21:00:23,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741831_1007 (size=1039) 2024-12-16T21:00:23,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741831_1007 (size=1039) 2024-12-16T21:00:23,040 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-16T21:00:23,040 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:00:23,042 INFO [master/6b259cf15929:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-16T21:00:23,043 INFO [master/6b259cf15929:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-16T21:00:23,046 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/6b259cf15929:0:becomeActiveMaster-HFileCleaner.large.0-1734382823045,5,FailOnTimeoutGroup] 2024-12-16T21:00:23,050 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/6b259cf15929:0:becomeActiveMaster-HFileCleaner.small.0-1734382823046,5,FailOnTimeoutGroup] 2024-12-16T21:00:23,050 INFO [master/6b259cf15929:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-16T21:00:23,050 INFO [master/6b259cf15929:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-16T21:00:23,052 INFO [master/6b259cf15929:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-16T21:00:23,053 INFO [master/6b259cf15929:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-16T21:00:23,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741832_1008 (size=32) 2024-12-16T21:00:23,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741832_1008 (size=32) 2024-12-16T21:00:23,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741832_1008 (size=32) 2024-12-16T21:00:23,074 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:00:23,077 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-16T21:00:23,080 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-16T21:00:23,080 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:00:23,082 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T21:00:23,082 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-16T21:00:23,085 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-16T21:00:23,085 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:00:23,086 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T21:00:23,086 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-16T21:00:23,089 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-16T21:00:23,089 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:00:23,090 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T21:00:23,092 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/hbase/meta/1588230740 2024-12-16T21:00:23,092 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/hbase/meta/1588230740 2024-12-16T21:00:23,095 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-16T21:00:23,098 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-16T21:00:23,098 INFO [RS:0;6b259cf15929:42323 {}] regionserver.HRegionServer(3073): reportForDuty to master=6b259cf15929,34343,1734382820356 with isa=6b259cf15929/172.17.0.2:42323, startcode=1734382821139 2024-12-16T21:00:23,098 INFO [RS:2;6b259cf15929:41713 {}] regionserver.HRegionServer(3073): reportForDuty to master=6b259cf15929,34343,1734382820356 with isa=6b259cf15929/172.17.0.2:41713, startcode=1734382821274 2024-12-16T21:00:23,098 INFO [RS:1;6b259cf15929:36861 {}] regionserver.HRegionServer(3073): reportForDuty to master=6b259cf15929,34343,1734382820356 with isa=6b259cf15929/172.17.0.2:36861, startcode=1734382821216 2024-12-16T21:00:23,100 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34343 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 6b259cf15929,42323,1734382821139 2024-12-16T21:00:23,103 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T21:00:23,103 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34343 {}] master.ServerManager(486): Registering regionserver=6b259cf15929,42323,1734382821139 2024-12-16T21:00:23,104 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68814795, jitterRate=0.025420352816581726}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-16T21:00:23,113 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-16T21:00:23,113 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-16T21:00:23,113 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-16T21:00:23,113 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-16T21:00:23,113 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-16T21:00:23,113 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-16T21:00:23,114 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34343 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 6b259cf15929,36861,1734382821216 2024-12-16T21:00:23,114 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34343 {}] master.ServerManager(486): Registering regionserver=6b259cf15929,36861,1734382821216 2024-12-16T21:00:23,114 DEBUG [RS:0;6b259cf15929:42323 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:00:23,114 DEBUG [RS:0;6b259cf15929:42323 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:37263 2024-12-16T21:00:23,114 DEBUG [RS:0;6b259cf15929:42323 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-16T21:00:23,117 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-16T21:00:23,117 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-16T21:00:23,119 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34343 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 6b259cf15929,41713,1734382821274 2024-12-16T21:00:23,119 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34343 {}] master.ServerManager(486): Registering regionserver=6b259cf15929,41713,1734382821274 2024-12-16T21:00:23,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-16T21:00:23,122 DEBUG [RS:0;6b259cf15929:42323 {}] zookeeper.ZKUtil(111): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6b259cf15929,42323,1734382821139 2024-12-16T21:00:23,122 WARN [RS:0;6b259cf15929:42323 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-16T21:00:23,122 DEBUG [RS:1;6b259cf15929:36861 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:00:23,122 INFO [RS:0;6b259cf15929:42323 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-16T21:00:23,122 DEBUG [RS:1;6b259cf15929:36861 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:37263 2024-12-16T21:00:23,122 DEBUG [RS:1;6b259cf15929:36861 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-16T21:00:23,122 DEBUG [RS:0;6b259cf15929:42323 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/WALs/6b259cf15929,42323,1734382821139 2024-12-16T21:00:23,123 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-16T21:00:23,124 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-16T21:00:23,125 DEBUG [RS:2;6b259cf15929:41713 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:00:23,125 DEBUG [RS:2;6b259cf15929:41713 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:37263 2024-12-16T21:00:23,125 DEBUG [RS:2;6b259cf15929:41713 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-16T21:00:23,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-16T21:00:23,128 DEBUG [RS:1;6b259cf15929:36861 {}] zookeeper.ZKUtil(111): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6b259cf15929,36861,1734382821216 2024-12-16T21:00:23,128 WARN [RS:1;6b259cf15929:36861 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-16T21:00:23,128 INFO [RS:1;6b259cf15929:36861 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-16T21:00:23,128 DEBUG [RS:1;6b259cf15929:36861 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/WALs/6b259cf15929,36861,1734382821216 2024-12-16T21:00:23,130 DEBUG [RS:2;6b259cf15929:41713 {}] zookeeper.ZKUtil(111): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6b259cf15929,41713,1734382821274 2024-12-16T21:00:23,130 WARN [RS:2;6b259cf15929:41713 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-16T21:00:23,130 INFO [RS:2;6b259cf15929:41713 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-16T21:00:23,130 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6b259cf15929,42323,1734382821139] 2024-12-16T21:00:23,130 DEBUG [RS:2;6b259cf15929:41713 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/WALs/6b259cf15929,41713,1734382821274 2024-12-16T21:00:23,131 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6b259cf15929,41713,1734382821274] 2024-12-16T21:00:23,131 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6b259cf15929,36861,1734382821216] 2024-12-16T21:00:23,133 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-16T21:00:23,145 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-16T21:00:23,151 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-16T21:00:23,174 DEBUG [RS:1;6b259cf15929:36861 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-16T21:00:23,189 DEBUG [RS:0;6b259cf15929:42323 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-16T21:00:23,190 INFO [RS:0;6b259cf15929:42323 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-16T21:00:23,193 INFO [RS:1;6b259cf15929:36861 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-16T21:00:23,206 DEBUG [RS:2;6b259cf15929:41713 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-16T21:00:23,206 INFO [RS:2;6b259cf15929:41713 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-16T21:00:23,209 INFO [RS:0;6b259cf15929:42323 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-16T21:00:23,209 INFO [RS:1;6b259cf15929:36861 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-16T21:00:23,209 INFO [RS:2;6b259cf15929:41713 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-16T21:00:23,217 INFO [RS:0;6b259cf15929:42323 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-16T21:00:23,217 INFO [RS:0;6b259cf15929:42323 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-16T21:00:23,218 INFO [RS:1;6b259cf15929:36861 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-16T21:00:23,218 INFO [RS:1;6b259cf15929:36861 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-16T21:00:23,218 INFO [RS:2;6b259cf15929:41713 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-16T21:00:23,218 INFO [RS:2;6b259cf15929:41713 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-16T21:00:23,218 INFO [RS:0;6b259cf15929:42323 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-16T21:00:23,220 INFO [RS:1;6b259cf15929:36861 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-16T21:00:23,223 INFO [RS:2;6b259cf15929:41713 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-16T21:00:23,226 INFO [RS:2;6b259cf15929:41713 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-16T21:00:23,226 INFO [RS:1;6b259cf15929:36861 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-16T21:00:23,226 INFO [RS:0;6b259cf15929:42323 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-16T21:00:23,227 DEBUG [RS:2;6b259cf15929:41713 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6b259cf15929:0, corePoolSize=1, maxPoolSize=1 2024-12-16T21:00:23,227 DEBUG [RS:1;6b259cf15929:36861 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6b259cf15929:0, corePoolSize=1, maxPoolSize=1 2024-12-16T21:00:23,227 DEBUG [RS:0;6b259cf15929:42323 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6b259cf15929:0, corePoolSize=1, maxPoolSize=1 2024-12-16T21:00:23,227 DEBUG [RS:1;6b259cf15929:36861 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6b259cf15929:0, corePoolSize=1, maxPoolSize=1 2024-12-16T21:00:23,227 DEBUG [RS:2;6b259cf15929:41713 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6b259cf15929:0, corePoolSize=1, maxPoolSize=1 2024-12-16T21:00:23,227 DEBUG [RS:0;6b259cf15929:42323 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6b259cf15929:0, corePoolSize=1, maxPoolSize=1 2024-12-16T21:00:23,227 DEBUG [RS:1;6b259cf15929:36861 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6b259cf15929:0, corePoolSize=1, maxPoolSize=1 2024-12-16T21:00:23,227 DEBUG [RS:2;6b259cf15929:41713 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6b259cf15929:0, corePoolSize=1, maxPoolSize=1 2024-12-16T21:00:23,227 DEBUG [RS:0;6b259cf15929:42323 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6b259cf15929:0, corePoolSize=1, maxPoolSize=1 2024-12-16T21:00:23,227 DEBUG [RS:2;6b259cf15929:41713 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6b259cf15929:0, corePoolSize=1, maxPoolSize=1 2024-12-16T21:00:23,227 DEBUG [RS:1;6b259cf15929:36861 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6b259cf15929:0, corePoolSize=1, maxPoolSize=1 2024-12-16T21:00:23,227 DEBUG [RS:2;6b259cf15929:41713 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6b259cf15929:0, corePoolSize=1, maxPoolSize=1 2024-12-16T21:00:23,227 DEBUG [RS:1;6b259cf15929:36861 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6b259cf15929:0, corePoolSize=1, maxPoolSize=1 2024-12-16T21:00:23,227 DEBUG [RS:2;6b259cf15929:41713 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6b259cf15929:0, corePoolSize=2, maxPoolSize=2 2024-12-16T21:00:23,227 DEBUG [RS:1;6b259cf15929:36861 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6b259cf15929:0, corePoolSize=2, maxPoolSize=2 2024-12-16T21:00:23,228 DEBUG [RS:2;6b259cf15929:41713 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6b259cf15929:0, corePoolSize=1, maxPoolSize=1 2024-12-16T21:00:23,228 DEBUG [RS:1;6b259cf15929:36861 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6b259cf15929:0, corePoolSize=1, maxPoolSize=1 2024-12-16T21:00:23,228 DEBUG [RS:2;6b259cf15929:41713 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6b259cf15929:0, corePoolSize=1, maxPoolSize=1 2024-12-16T21:00:23,228 DEBUG [RS:1;6b259cf15929:36861 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6b259cf15929:0, corePoolSize=1, maxPoolSize=1 2024-12-16T21:00:23,228 DEBUG [RS:2;6b259cf15929:41713 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6b259cf15929:0, corePoolSize=1, maxPoolSize=1 2024-12-16T21:00:23,228 DEBUG [RS:1;6b259cf15929:36861 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6b259cf15929:0, corePoolSize=1, maxPoolSize=1 2024-12-16T21:00:23,228 DEBUG [RS:1;6b259cf15929:36861 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6b259cf15929:0, corePoolSize=1, maxPoolSize=1 2024-12-16T21:00:23,228 DEBUG [RS:2;6b259cf15929:41713 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6b259cf15929:0, corePoolSize=1, maxPoolSize=1 2024-12-16T21:00:23,228 DEBUG [RS:2;6b259cf15929:41713 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6b259cf15929:0, corePoolSize=1, maxPoolSize=1 2024-12-16T21:00:23,228 DEBUG [RS:1;6b259cf15929:36861 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6b259cf15929:0, corePoolSize=1, maxPoolSize=1 2024-12-16T21:00:23,228 DEBUG [RS:1;6b259cf15929:36861 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0, corePoolSize=3, maxPoolSize=3 2024-12-16T21:00:23,228 DEBUG [RS:2;6b259cf15929:41713 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0, corePoolSize=3, maxPoolSize=3 2024-12-16T21:00:23,229 DEBUG [RS:1;6b259cf15929:36861 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6b259cf15929:0, corePoolSize=3, maxPoolSize=3 2024-12-16T21:00:23,229 DEBUG [RS:2;6b259cf15929:41713 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6b259cf15929:0, corePoolSize=3, maxPoolSize=3 2024-12-16T21:00:23,227 DEBUG [RS:0;6b259cf15929:42323 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6b259cf15929:0, corePoolSize=1, maxPoolSize=1 2024-12-16T21:00:23,238 DEBUG [RS:0;6b259cf15929:42323 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6b259cf15929:0, corePoolSize=1, maxPoolSize=1 2024-12-16T21:00:23,238 DEBUG [RS:0;6b259cf15929:42323 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6b259cf15929:0, corePoolSize=2, maxPoolSize=2 2024-12-16T21:00:23,238 DEBUG [RS:0;6b259cf15929:42323 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6b259cf15929:0, corePoolSize=1, maxPoolSize=1 2024-12-16T21:00:23,238 DEBUG [RS:0;6b259cf15929:42323 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6b259cf15929:0, corePoolSize=1, maxPoolSize=1 2024-12-16T21:00:23,238 DEBUG [RS:0;6b259cf15929:42323 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6b259cf15929:0, corePoolSize=1, maxPoolSize=1 2024-12-16T21:00:23,238 DEBUG [RS:0;6b259cf15929:42323 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6b259cf15929:0, corePoolSize=1, maxPoolSize=1 2024-12-16T21:00:23,238 DEBUG [RS:0;6b259cf15929:42323 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6b259cf15929:0, corePoolSize=1, maxPoolSize=1 2024-12-16T21:00:23,239 DEBUG [RS:0;6b259cf15929:42323 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0, corePoolSize=3, maxPoolSize=3 2024-12-16T21:00:23,239 DEBUG [RS:0;6b259cf15929:42323 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6b259cf15929:0, corePoolSize=3, maxPoolSize=3 2024-12-16T21:00:23,240 INFO [RS:2;6b259cf15929:41713 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-16T21:00:23,240 INFO [RS:2;6b259cf15929:41713 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-16T21:00:23,240 INFO [RS:2;6b259cf15929:41713 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-16T21:00:23,240 INFO [RS:2;6b259cf15929:41713 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-16T21:00:23,240 INFO [RS:2;6b259cf15929:41713 {}] hbase.ChoreService(168): Chore ScheduledChore name=6b259cf15929,41713,1734382821274-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-16T21:00:23,242 INFO [RS:1;6b259cf15929:36861 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-16T21:00:23,242 INFO [RS:1;6b259cf15929:36861 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-16T21:00:23,242 INFO [RS:1;6b259cf15929:36861 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-16T21:00:23,242 INFO [RS:1;6b259cf15929:36861 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-16T21:00:23,242 INFO [RS:1;6b259cf15929:36861 {}] hbase.ChoreService(168): Chore ScheduledChore name=6b259cf15929,36861,1734382821216-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-16T21:00:23,258 INFO [RS:0;6b259cf15929:42323 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-16T21:00:23,258 INFO [RS:0;6b259cf15929:42323 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-16T21:00:23,258 INFO [RS:0;6b259cf15929:42323 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-16T21:00:23,258 INFO [RS:0;6b259cf15929:42323 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-16T21:00:23,258 INFO [RS:0;6b259cf15929:42323 {}] hbase.ChoreService(168): Chore ScheduledChore name=6b259cf15929,42323,1734382821139-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-16T21:00:23,276 INFO [RS:1;6b259cf15929:36861 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-16T21:00:23,277 INFO [RS:2;6b259cf15929:41713 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-16T21:00:23,279 INFO [RS:1;6b259cf15929:36861 {}] hbase.ChoreService(168): Chore ScheduledChore name=6b259cf15929,36861,1734382821216-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-16T21:00:23,279 INFO [RS:2;6b259cf15929:41713 {}] hbase.ChoreService(168): Chore ScheduledChore name=6b259cf15929,41713,1734382821274-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-16T21:00:23,288 INFO [RS:0;6b259cf15929:42323 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-16T21:00:23,288 INFO [RS:0;6b259cf15929:42323 {}] hbase.ChoreService(168): Chore ScheduledChore name=6b259cf15929,42323,1734382821139-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-16T21:00:23,302 WARN [6b259cf15929:34343 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-16T21:00:23,306 INFO [RS:1;6b259cf15929:36861 {}] regionserver.Replication(204): 6b259cf15929,36861,1734382821216 started 2024-12-16T21:00:23,306 INFO [RS:2;6b259cf15929:41713 {}] regionserver.Replication(204): 6b259cf15929,41713,1734382821274 started 2024-12-16T21:00:23,306 INFO [RS:1;6b259cf15929:36861 {}] regionserver.HRegionServer(1767): Serving as 6b259cf15929,36861,1734382821216, RpcServer on 6b259cf15929/172.17.0.2:36861, sessionid=0x1000b4e97660002 2024-12-16T21:00:23,306 INFO [RS:2;6b259cf15929:41713 {}] regionserver.HRegionServer(1767): Serving as 6b259cf15929,41713,1734382821274, RpcServer on 6b259cf15929/172.17.0.2:41713, sessionid=0x1000b4e97660003 2024-12-16T21:00:23,307 DEBUG [RS:1;6b259cf15929:36861 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-16T21:00:23,307 DEBUG [RS:2;6b259cf15929:41713 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-16T21:00:23,307 DEBUG [RS:1;6b259cf15929:36861 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6b259cf15929,36861,1734382821216 2024-12-16T21:00:23,307 DEBUG [RS:2;6b259cf15929:41713 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6b259cf15929,41713,1734382821274 2024-12-16T21:00:23,307 DEBUG [RS:1;6b259cf15929:36861 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6b259cf15929,36861,1734382821216' 2024-12-16T21:00:23,307 DEBUG [RS:2;6b259cf15929:41713 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6b259cf15929,41713,1734382821274' 2024-12-16T21:00:23,307 DEBUG [RS:1;6b259cf15929:36861 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-16T21:00:23,307 DEBUG [RS:2;6b259cf15929:41713 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-16T21:00:23,308 DEBUG [RS:2;6b259cf15929:41713 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-16T21:00:23,308 DEBUG [RS:1;6b259cf15929:36861 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-16T21:00:23,310 DEBUG [RS:2;6b259cf15929:41713 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-16T21:00:23,311 DEBUG [RS:2;6b259cf15929:41713 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-16T21:00:23,311 DEBUG [RS:2;6b259cf15929:41713 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6b259cf15929,41713,1734382821274 2024-12-16T21:00:23,311 DEBUG [RS:2;6b259cf15929:41713 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6b259cf15929,41713,1734382821274' 2024-12-16T21:00:23,311 DEBUG [RS:2;6b259cf15929:41713 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-16T21:00:23,311 DEBUG [RS:2;6b259cf15929:41713 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-16T21:00:23,311 DEBUG [RS:1;6b259cf15929:36861 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-16T21:00:23,311 DEBUG [RS:1;6b259cf15929:36861 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-16T21:00:23,312 DEBUG [RS:1;6b259cf15929:36861 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6b259cf15929,36861,1734382821216 2024-12-16T21:00:23,312 DEBUG [RS:1;6b259cf15929:36861 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6b259cf15929,36861,1734382821216' 2024-12-16T21:00:23,312 DEBUG [RS:1;6b259cf15929:36861 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-16T21:00:23,312 DEBUG [RS:1;6b259cf15929:36861 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-16T21:00:23,312 DEBUG [RS:2;6b259cf15929:41713 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-16T21:00:23,312 INFO [RS:2;6b259cf15929:41713 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-16T21:00:23,312 INFO [RS:2;6b259cf15929:41713 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-16T21:00:23,313 DEBUG [RS:1;6b259cf15929:36861 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-16T21:00:23,313 INFO [RS:1;6b259cf15929:36861 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-16T21:00:23,313 INFO [RS:1;6b259cf15929:36861 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-16T21:00:23,322 INFO [RS:0;6b259cf15929:42323 {}] regionserver.Replication(204): 6b259cf15929,42323,1734382821139 started 2024-12-16T21:00:23,322 INFO [RS:0;6b259cf15929:42323 {}] regionserver.HRegionServer(1767): Serving as 6b259cf15929,42323,1734382821139, RpcServer on 6b259cf15929/172.17.0.2:42323, sessionid=0x1000b4e97660001 2024-12-16T21:00:23,322 DEBUG [RS:0;6b259cf15929:42323 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-16T21:00:23,322 DEBUG [RS:0;6b259cf15929:42323 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6b259cf15929,42323,1734382821139 2024-12-16T21:00:23,322 DEBUG [RS:0;6b259cf15929:42323 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6b259cf15929,42323,1734382821139' 2024-12-16T21:00:23,322 DEBUG [RS:0;6b259cf15929:42323 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-16T21:00:23,323 DEBUG [RS:0;6b259cf15929:42323 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-16T21:00:23,324 DEBUG [RS:0;6b259cf15929:42323 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-16T21:00:23,324 DEBUG [RS:0;6b259cf15929:42323 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-16T21:00:23,324 DEBUG [RS:0;6b259cf15929:42323 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6b259cf15929,42323,1734382821139 2024-12-16T21:00:23,324 DEBUG [RS:0;6b259cf15929:42323 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6b259cf15929,42323,1734382821139' 2024-12-16T21:00:23,324 DEBUG [RS:0;6b259cf15929:42323 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-16T21:00:23,325 DEBUG [RS:0;6b259cf15929:42323 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-16T21:00:23,325 DEBUG [RS:0;6b259cf15929:42323 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-16T21:00:23,325 INFO [RS:0;6b259cf15929:42323 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-16T21:00:23,325 INFO [RS:0;6b259cf15929:42323 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-16T21:00:23,419 INFO [RS:1;6b259cf15929:36861 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-16T21:00:23,419 INFO [RS:2;6b259cf15929:41713 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-16T21:00:23,423 INFO [RS:1;6b259cf15929:36861 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6b259cf15929%2C36861%2C1734382821216, suffix=, logDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/WALs/6b259cf15929,36861,1734382821216, archiveDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/oldWALs, maxLogs=32 2024-12-16T21:00:23,424 INFO [RS:2;6b259cf15929:41713 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6b259cf15929%2C41713%2C1734382821274, suffix=, logDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/WALs/6b259cf15929,41713,1734382821274, archiveDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/oldWALs, maxLogs=32 2024-12-16T21:00:23,426 INFO [RS:0;6b259cf15929:42323 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-16T21:00:23,433 INFO [RS:0;6b259cf15929:42323 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6b259cf15929%2C42323%2C1734382821139, suffix=, logDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/WALs/6b259cf15929,42323,1734382821139, archiveDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/oldWALs, maxLogs=32 2024-12-16T21:00:23,449 DEBUG [RS:1;6b259cf15929:36861 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/WALs/6b259cf15929,36861,1734382821216/6b259cf15929%2C36861%2C1734382821216.1734382823427, exclude list is [], retry=0 2024-12-16T21:00:23,451 DEBUG [RS:2;6b259cf15929:41713 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/WALs/6b259cf15929,41713,1734382821274/6b259cf15929%2C41713%2C1734382821274.1734382823428, exclude list is [], retry=0 2024-12-16T21:00:23,454 DEBUG [RS:0;6b259cf15929:42323 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/WALs/6b259cf15929,42323,1734382821139/6b259cf15929%2C42323%2C1734382821139.1734382823435, exclude list is [], retry=0 2024-12-16T21:00:23,457 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39445,DS-cad7b0df-8300-46e6-8020-3278417c175b,DISK] 2024-12-16T21:00:23,457 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33147,DS-d9807c4d-d3e1-401e-be38-1a140a301b09,DISK] 2024-12-16T21:00:23,458 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44571,DS-0cf8bf6a-38e2-4ed2-80f6-0482d7a64cda,DISK] 2024-12-16T21:00:23,461 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44571,DS-0cf8bf6a-38e2-4ed2-80f6-0482d7a64cda,DISK] 2024-12-16T21:00:23,461 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33147,DS-d9807c4d-d3e1-401e-be38-1a140a301b09,DISK] 2024-12-16T21:00:23,461 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39445,DS-cad7b0df-8300-46e6-8020-3278417c175b,DISK] 2024-12-16T21:00:23,473 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44571,DS-0cf8bf6a-38e2-4ed2-80f6-0482d7a64cda,DISK] 2024-12-16T21:00:23,473 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33147,DS-d9807c4d-d3e1-401e-be38-1a140a301b09,DISK] 2024-12-16T21:00:23,473 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39445,DS-cad7b0df-8300-46e6-8020-3278417c175b,DISK] 2024-12-16T21:00:23,541 INFO [RS:1;6b259cf15929:36861 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/WALs/6b259cf15929,36861,1734382821216/6b259cf15929%2C36861%2C1734382821216.1734382823427 2024-12-16T21:00:23,550 DEBUG [RS:1;6b259cf15929:36861 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43731:43731),(127.0.0.1/127.0.0.1:39437:39437),(127.0.0.1/127.0.0.1:44331:44331)] 2024-12-16T21:00:23,551 INFO [RS:2;6b259cf15929:41713 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/WALs/6b259cf15929,41713,1734382821274/6b259cf15929%2C41713%2C1734382821274.1734382823428 2024-12-16T21:00:23,555 INFO [RS:0;6b259cf15929:42323 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/WALs/6b259cf15929,42323,1734382821139/6b259cf15929%2C42323%2C1734382821139.1734382823435 2024-12-16T21:00:23,558 DEBUG [RS:2;6b259cf15929:41713 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39437:39437),(127.0.0.1/127.0.0.1:43731:43731),(127.0.0.1/127.0.0.1:44331:44331)] 2024-12-16T21:00:23,558 DEBUG [RS:0;6b259cf15929:42323 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43731:43731),(127.0.0.1/127.0.0.1:39437:39437),(127.0.0.1/127.0.0.1:44331:44331)] 2024-12-16T21:00:23,803 DEBUG [6b259cf15929:34343 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-16T21:00:23,806 DEBUG [6b259cf15929:34343 {}] balancer.BalancerClusterState(202): Hosts are {6b259cf15929=0} racks are {/default-rack=0} 2024-12-16T21:00:23,812 DEBUG [6b259cf15929:34343 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-16T21:00:23,812 DEBUG [6b259cf15929:34343 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-16T21:00:23,812 DEBUG [6b259cf15929:34343 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-16T21:00:23,812 INFO [6b259cf15929:34343 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-16T21:00:23,812 INFO [6b259cf15929:34343 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-16T21:00:23,812 INFO [6b259cf15929:34343 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-16T21:00:23,812 DEBUG [6b259cf15929:34343 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-16T21:00:23,817 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=6b259cf15929,41713,1734382821274 2024-12-16T21:00:23,822 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6b259cf15929,41713,1734382821274, state=OPENING 2024-12-16T21:00:23,827 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-16T21:00:23,829 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T21:00:23,829 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T21:00:23,829 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T21:00:23,829 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T21:00:23,830 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T21:00:23,830 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T21:00:23,830 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T21:00:23,830 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T21:00:23,833 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=6b259cf15929,41713,1734382821274}] 2024-12-16T21:00:24,008 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6b259cf15929,41713,1734382821274 2024-12-16T21:00:24,010 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-16T21:00:24,012 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47144, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-16T21:00:24,023 INFO [RS_OPEN_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-16T21:00:24,023 INFO [RS_OPEN_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-16T21:00:24,024 INFO [RS_OPEN_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-16T21:00:24,027 INFO [RS_OPEN_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6b259cf15929%2C41713%2C1734382821274.meta, suffix=.meta, logDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/WALs/6b259cf15929,41713,1734382821274, archiveDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/oldWALs, maxLogs=32 2024-12-16T21:00:24,047 DEBUG [RS_OPEN_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/WALs/6b259cf15929,41713,1734382821274/6b259cf15929%2C41713%2C1734382821274.meta.1734382824029.meta, exclude list is [], retry=0 2024-12-16T21:00:24,051 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39445,DS-cad7b0df-8300-46e6-8020-3278417c175b,DISK] 2024-12-16T21:00:24,051 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44571,DS-0cf8bf6a-38e2-4ed2-80f6-0482d7a64cda,DISK] 2024-12-16T21:00:24,051 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33147,DS-d9807c4d-d3e1-401e-be38-1a140a301b09,DISK] 2024-12-16T21:00:24,057 INFO [RS_OPEN_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/WALs/6b259cf15929,41713,1734382821274/6b259cf15929%2C41713%2C1734382821274.meta.1734382824029.meta 2024-12-16T21:00:24,060 DEBUG [RS_OPEN_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43731:43731),(127.0.0.1/127.0.0.1:39437:39437),(127.0.0.1/127.0.0.1:44331:44331)] 2024-12-16T21:00:24,060 DEBUG [RS_OPEN_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-16T21:00:24,061 DEBUG [RS_OPEN_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=AccessControlService 2024-12-16T21:00:24,062 INFO [RS_OPEN_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T21:00:24,063 DEBUG [RS_OPEN_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-16T21:00:24,064 DEBUG [RS_OPEN_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-16T21:00:24,065 INFO [RS_OPEN_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-16T21:00:24,076 DEBUG [RS_OPEN_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-16T21:00:24,076 DEBUG [RS_OPEN_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:00:24,076 DEBUG [RS_OPEN_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-16T21:00:24,076 DEBUG [RS_OPEN_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-16T21:00:24,082 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-16T21:00:24,084 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-16T21:00:24,084 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:00:24,085 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T21:00:24,086 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-16T21:00:24,087 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-16T21:00:24,087 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:00:24,088 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T21:00:24,088 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-16T21:00:24,090 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-16T21:00:24,090 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:00:24,092 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T21:00:24,093 DEBUG [RS_OPEN_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/hbase/meta/1588230740 2024-12-16T21:00:24,096 DEBUG [RS_OPEN_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/hbase/meta/1588230740 2024-12-16T21:00:24,099 DEBUG [RS_OPEN_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-16T21:00:24,101 DEBUG [RS_OPEN_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-16T21:00:24,106 INFO [RS_OPEN_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69085351, jitterRate=0.029451951384544373}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-16T21:00:24,110 DEBUG [RS_OPEN_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-16T21:00:24,118 INFO [RS_OPEN_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1734382824003 2024-12-16T21:00:24,131 DEBUG [RS_OPEN_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-16T21:00:24,132 INFO [RS_OPEN_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-16T21:00:24,133 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=6b259cf15929,41713,1734382821274 2024-12-16T21:00:24,138 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6b259cf15929,41713,1734382821274, state=OPEN 2024-12-16T21:00:24,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-16T21:00:24,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-16T21:00:24,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-16T21:00:24,142 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T21:00:24,142 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T21:00:24,142 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-16T21:00:24,142 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T21:00:24,142 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T21:00:24,147 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-16T21:00:24,147 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=6b259cf15929,41713,1734382821274 in 308 msec 2024-12-16T21:00:24,153 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-16T21:00:24,154 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 1.0160 sec 2024-12-16T21:00:24,159 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.2760 sec 2024-12-16T21:00:24,159 INFO [master/6b259cf15929:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1734382824159, completionTime=-1 2024-12-16T21:00:24,159 INFO [master/6b259cf15929:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-16T21:00:24,159 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-16T21:00:24,204 DEBUG [hconnection-0x70b78df7-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:00:24,206 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47154, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:00:24,222 INFO [master/6b259cf15929:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=3 2024-12-16T21:00:24,222 INFO [master/6b259cf15929:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1734382884222 2024-12-16T21:00:24,222 INFO [master/6b259cf15929:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1734382944222 2024-12-16T21:00:24,222 INFO [master/6b259cf15929:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 62 msec 2024-12-16T21:00:24,243 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] balancer.RegionLocationFinder(172): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-16T21:00:24,252 INFO [master/6b259cf15929:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6b259cf15929,34343,1734382820356-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-16T21:00:24,253 INFO [master/6b259cf15929:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6b259cf15929,34343,1734382820356-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-16T21:00:24,253 INFO [master/6b259cf15929:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6b259cf15929,34343,1734382820356-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-16T21:00:24,255 INFO [master/6b259cf15929:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-6b259cf15929:34343, period=300000, unit=MILLISECONDS is enabled. 2024-12-16T21:00:24,255 INFO [master/6b259cf15929:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-16T21:00:24,289 DEBUG [master/6b259cf15929:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-16T21:00:24,291 INFO [master/6b259cf15929:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-16T21:00:24,292 INFO [master/6b259cf15929:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-16T21:00:24,300 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-16T21:00:24,303 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T21:00:24,304 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:00:24,307 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T21:00:24,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741837_1013 (size=358) 2024-12-16T21:00:24,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741837_1013 (size=358) 2024-12-16T21:00:24,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741837_1013 (size=358) 2024-12-16T21:00:24,344 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => ffdc28263b845c2b16a120a0817fe8a5, NAME => 'hbase:namespace,,1734382824291.ffdc28263b845c2b16a120a0817fe8a5.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:00:24,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741838_1014 (size=42) 2024-12-16T21:00:24,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741838_1014 (size=42) 2024-12-16T21:00:24,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741838_1014 (size=42) 2024-12-16T21:00:24,362 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734382824291.ffdc28263b845c2b16a120a0817fe8a5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:00:24,362 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing ffdc28263b845c2b16a120a0817fe8a5, disabling compactions & flushes 2024-12-16T21:00:24,362 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734382824291.ffdc28263b845c2b16a120a0817fe8a5. 2024-12-16T21:00:24,362 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734382824291.ffdc28263b845c2b16a120a0817fe8a5. 2024-12-16T21:00:24,362 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734382824291.ffdc28263b845c2b16a120a0817fe8a5. after waiting 0 ms 2024-12-16T21:00:24,363 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734382824291.ffdc28263b845c2b16a120a0817fe8a5. 2024-12-16T21:00:24,363 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1734382824291.ffdc28263b845c2b16a120a0817fe8a5. 2024-12-16T21:00:24,363 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for ffdc28263b845c2b16a120a0817fe8a5: 2024-12-16T21:00:24,365 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T21:00:24,371 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1734382824291.ffdc28263b845c2b16a120a0817fe8a5.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1734382824366"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734382824366"}]},"ts":"1734382824366"} 2024-12-16T21:00:24,396 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-16T21:00:24,398 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T21:00:24,401 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382824398"}]},"ts":"1734382824398"} 2024-12-16T21:00:24,405 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-16T21:00:24,409 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(202): Hosts are {6b259cf15929=0} racks are {/default-rack=0} 2024-12-16T21:00:24,410 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-16T21:00:24,410 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-16T21:00:24,410 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-16T21:00:24,410 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-16T21:00:24,410 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-16T21:00:24,410 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-16T21:00:24,410 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-16T21:00:24,412 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=ffdc28263b845c2b16a120a0817fe8a5, ASSIGN}] 2024-12-16T21:00:24,414 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=ffdc28263b845c2b16a120a0817fe8a5, ASSIGN 2024-12-16T21:00:24,416 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=ffdc28263b845c2b16a120a0817fe8a5, ASSIGN; state=OFFLINE, location=6b259cf15929,42323,1734382821139; forceNewPlan=false, retain=false 2024-12-16T21:00:24,567 INFO [6b259cf15929:34343 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-16T21:00:24,567 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=ffdc28263b845c2b16a120a0817fe8a5, regionState=OPENING, regionLocation=6b259cf15929,42323,1734382821139 2024-12-16T21:00:24,571 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure ffdc28263b845c2b16a120a0817fe8a5, server=6b259cf15929,42323,1734382821139}] 2024-12-16T21:00:24,725 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6b259cf15929,42323,1734382821139 2024-12-16T21:00:24,725 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-16T21:00:24,728 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48684, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-16T21:00:24,734 INFO [RS_OPEN_PRIORITY_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1734382824291.ffdc28263b845c2b16a120a0817fe8a5. 2024-12-16T21:00:24,735 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => ffdc28263b845c2b16a120a0817fe8a5, NAME => 'hbase:namespace,,1734382824291.ffdc28263b845c2b16a120a0817fe8a5.', STARTKEY => '', ENDKEY => ''} 2024-12-16T21:00:24,735 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:namespace,,1734382824291.ffdc28263b845c2b16a120a0817fe8a5. service=AccessControlService 2024-12-16T21:00:24,735 INFO [RS_OPEN_PRIORITY_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T21:00:24,736 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace ffdc28263b845c2b16a120a0817fe8a5 2024-12-16T21:00:24,736 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734382824291.ffdc28263b845c2b16a120a0817fe8a5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:00:24,736 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for ffdc28263b845c2b16a120a0817fe8a5 2024-12-16T21:00:24,736 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for ffdc28263b845c2b16a120a0817fe8a5 2024-12-16T21:00:24,739 INFO [StoreOpener-ffdc28263b845c2b16a120a0817fe8a5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region ffdc28263b845c2b16a120a0817fe8a5 2024-12-16T21:00:24,741 INFO [StoreOpener-ffdc28263b845c2b16a120a0817fe8a5-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ffdc28263b845c2b16a120a0817fe8a5 columnFamilyName info 2024-12-16T21:00:24,741 DEBUG [StoreOpener-ffdc28263b845c2b16a120a0817fe8a5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:00:24,742 INFO [StoreOpener-ffdc28263b845c2b16a120a0817fe8a5-1 {}] regionserver.HStore(327): Store=ffdc28263b845c2b16a120a0817fe8a5/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T21:00:24,744 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/hbase/namespace/ffdc28263b845c2b16a120a0817fe8a5 2024-12-16T21:00:24,745 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/hbase/namespace/ffdc28263b845c2b16a120a0817fe8a5 2024-12-16T21:00:24,749 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for ffdc28263b845c2b16a120a0817fe8a5 2024-12-16T21:00:24,755 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/hbase/namespace/ffdc28263b845c2b16a120a0817fe8a5/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T21:00:24,756 INFO [RS_OPEN_PRIORITY_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened ffdc28263b845c2b16a120a0817fe8a5; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60620885, jitterRate=-0.09667842090129852}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T21:00:24,757 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for ffdc28263b845c2b16a120a0817fe8a5: 2024-12-16T21:00:24,766 INFO [RS_OPEN_PRIORITY_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1734382824291.ffdc28263b845c2b16a120a0817fe8a5., pid=6, masterSystemTime=1734382824725 2024-12-16T21:00:24,770 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1734382824291.ffdc28263b845c2b16a120a0817fe8a5. 2024-12-16T21:00:24,770 INFO [RS_OPEN_PRIORITY_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1734382824291.ffdc28263b845c2b16a120a0817fe8a5. 2024-12-16T21:00:24,771 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=ffdc28263b845c2b16a120a0817fe8a5, regionState=OPEN, openSeqNum=2, regionLocation=6b259cf15929,42323,1734382821139 2024-12-16T21:00:24,779 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-16T21:00:24,779 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure ffdc28263b845c2b16a120a0817fe8a5, server=6b259cf15929,42323,1734382821139 in 204 msec 2024-12-16T21:00:24,783 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-16T21:00:24,783 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=ffdc28263b845c2b16a120a0817fe8a5, ASSIGN in 367 msec 2024-12-16T21:00:24,785 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T21:00:24,785 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382824785"}]},"ts":"1734382824785"} 2024-12-16T21:00:24,787 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-16T21:00:24,791 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T21:00:24,794 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 498 msec 2024-12-16T21:00:24,803 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-16T21:00:24,806 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T21:00:24,806 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-16T21:00:24,806 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T21:00:24,806 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T21:00:24,806 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T21:00:24,829 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:00:24,831 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48690, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:00:24,839 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-16T21:00:24,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-16T21:00:24,860 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 24 msec 2024-12-16T21:00:24,873 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-16T21:00:24,884 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-16T21:00:24,889 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 15 msec 2024-12-16T21:00:24,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-16T21:00:24,902 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-16T21:00:24,902 INFO [master/6b259cf15929:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 3.517sec 2024-12-16T21:00:24,905 INFO [master/6b259cf15929:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-16T21:00:24,906 INFO [master/6b259cf15929:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-16T21:00:24,907 INFO [master/6b259cf15929:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-16T21:00:24,908 INFO [master/6b259cf15929:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-16T21:00:24,908 INFO [master/6b259cf15929:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-16T21:00:24,909 INFO [master/6b259cf15929:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6b259cf15929,34343,1734382820356-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-16T21:00:24,909 INFO [master/6b259cf15929:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6b259cf15929,34343,1734382820356-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-16T21:00:24,924 INFO [master/6b259cf15929:0:becomeActiveMaster {}] master.HMaster$4(2389): Client=null/null create 'hbase:acl', {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-16T21:00:24,926 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:acl 2024-12-16T21:00:24,927 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T21:00:24,928 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:00:24,928 INFO [master/6b259cf15929:0:becomeActiveMaster {}] master.MasterRpcServices(713): Client=null/null procedure request for creating table: namespace: "hbase" qualifier: "acl" procId is: 9 2024-12-16T21:00:24,929 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T21:00:24,931 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-16T21:00:24,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741839_1015 (size=349) 2024-12-16T21:00:24,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741839_1015 (size=349) 2024-12-16T21:00:24,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741839_1015 (size=349) 2024-12-16T21:00:24,947 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 5ff33ba35f8500425d07b6bfd353cd16, NAME => 'hbase:acl,,1734382824921.5ff33ba35f8500425d07b6bfd353cd16.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:acl', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:00:24,950 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x75e74344 to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@76ebc6b4 2024-12-16T21:00:24,951 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-12-16T21:00:24,959 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17c05315, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:00:24,964 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-16T21:00:24,964 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-16T21:00:24,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741840_1016 (size=36) 2024-12-16T21:00:24,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741840_1016 (size=36) 2024-12-16T21:00:24,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741840_1016 (size=36) 2024-12-16T21:00:24,974 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:acl,,1734382824921.5ff33ba35f8500425d07b6bfd353cd16.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:00:24,974 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1681): Closing 5ff33ba35f8500425d07b6bfd353cd16, disabling compactions & flushes 2024-12-16T21:00:24,974 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:acl,,1734382824921.5ff33ba35f8500425d07b6bfd353cd16. 2024-12-16T21:00:24,974 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:acl,,1734382824921.5ff33ba35f8500425d07b6bfd353cd16. 2024-12-16T21:00:24,974 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:acl,,1734382824921.5ff33ba35f8500425d07b6bfd353cd16. after waiting 0 ms 2024-12-16T21:00:24,974 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:acl,,1734382824921.5ff33ba35f8500425d07b6bfd353cd16. 2024-12-16T21:00:24,974 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1922): Closed hbase:acl,,1734382824921.5ff33ba35f8500425d07b6bfd353cd16. 2024-12-16T21:00:24,975 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1635): Region close journal for 5ff33ba35f8500425d07b6bfd353cd16: 2024-12-16T21:00:24,977 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T21:00:24,977 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:acl,,1734382824921.5ff33ba35f8500425d07b6bfd353cd16.","families":{"info":[{"qualifier":"regioninfo","vlen":35,"tag":[],"timestamp":"1734382824977"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734382824977"}]},"ts":"1734382824977"} 2024-12-16T21:00:24,981 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-16T21:00:24,982 DEBUG [hconnection-0x35b717f-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:00:24,983 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T21:00:24,983 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382824983"}]},"ts":"1734382824983"} 2024-12-16T21:00:24,986 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:acl, state=ENABLING in hbase:meta 2024-12-16T21:00:24,990 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(202): Hosts are {6b259cf15929=0} racks are {/default-rack=0} 2024-12-16T21:00:24,992 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-16T21:00:24,992 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-16T21:00:24,992 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-16T21:00:24,992 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-16T21:00:24,992 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-16T21:00:24,992 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-16T21:00:24,992 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-16T21:00:24,993 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:acl, region=5ff33ba35f8500425d07b6bfd353cd16, ASSIGN}] 2024-12-16T21:00:24,996 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:acl, region=5ff33ba35f8500425d07b6bfd353cd16, ASSIGN 2024-12-16T21:00:24,997 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47170, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:00:24,999 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:acl, region=5ff33ba35f8500425d07b6bfd353cd16, ASSIGN; state=OFFLINE, location=6b259cf15929,42323,1734382821139; forceNewPlan=false, retain=false 2024-12-16T21:00:25,002 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=6b259cf15929,34343,1734382820356 2024-12-16T21:00:25,002 INFO [Time-limited test {}] hbase.HBaseTestingUtility(2790): Starting mini mapreduce cluster... 2024-12-16T21:00:25,002 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/test.cache.data in system properties and HBase conf 2024-12-16T21:00:25,002 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/hadoop.tmp.dir in system properties and HBase conf 2024-12-16T21:00:25,002 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/hadoop.log.dir in system properties and HBase conf 2024-12-16T21:00:25,002 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-16T21:00:25,003 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-16T21:00:25,003 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-16T21:00:25,003 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-16T21:00:25,003 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-16T21:00:25,003 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-16T21:00:25,003 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-16T21:00:25,003 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-16T21:00:25,003 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-16T21:00:25,003 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-16T21:00:25,004 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-16T21:00:25,004 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-16T21:00:25,004 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/nfs.dump.dir in system properties and HBase conf 2024-12-16T21:00:25,004 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/java.io.tmpdir in system properties and HBase conf 2024-12-16T21:00:25,004 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-16T21:00:25,004 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-16T21:00:25,004 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-16T21:00:25,033 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-16T21:00:25,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741841_1017 (size=592039) 2024-12-16T21:00:25,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741841_1017 (size=592039) 2024-12-16T21:00:25,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741841_1017 (size=592039) 2024-12-16T21:00:25,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741842_1018 (size=1663647) 2024-12-16T21:00:25,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741842_1018 (size=1663647) 2024-12-16T21:00:25,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741842_1018 (size=1663647) 2024-12-16T21:00:25,150 INFO [6b259cf15929:34343 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-16T21:00:25,150 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=5ff33ba35f8500425d07b6bfd353cd16, regionState=OPENING, regionLocation=6b259cf15929,42323,1734382821139 2024-12-16T21:00:25,153 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 5ff33ba35f8500425d07b6bfd353cd16, server=6b259cf15929,42323,1734382821139}] 2024-12-16T21:00:25,233 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-16T21:00:25,323 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6b259cf15929,42323,1734382821139 2024-12-16T21:00:25,364 INFO [RS_OPEN_PRIORITY_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] handler.AssignRegionHandler(135): Open hbase:acl,,1734382824921.5ff33ba35f8500425d07b6bfd353cd16. 2024-12-16T21:00:25,365 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 5ff33ba35f8500425d07b6bfd353cd16, NAME => 'hbase:acl,,1734382824921.5ff33ba35f8500425d07b6bfd353cd16.', STARTKEY => '', ENDKEY => ''} 2024-12-16T21:00:25,366 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:acl,,1734382824921.5ff33ba35f8500425d07b6bfd353cd16. service=AccessControlService 2024-12-16T21:00:25,366 INFO [RS_OPEN_PRIORITY_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T21:00:25,366 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl 5ff33ba35f8500425d07b6bfd353cd16 2024-12-16T21:00:25,366 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(894): Instantiated hbase:acl,,1734382824921.5ff33ba35f8500425d07b6bfd353cd16.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:00:25,367 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 5ff33ba35f8500425d07b6bfd353cd16 2024-12-16T21:00:25,367 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 5ff33ba35f8500425d07b6bfd353cd16 2024-12-16T21:00:25,369 INFO [StoreOpener-5ff33ba35f8500425d07b6bfd353cd16-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region 5ff33ba35f8500425d07b6bfd353cd16 2024-12-16T21:00:25,372 INFO [StoreOpener-5ff33ba35f8500425d07b6bfd353cd16-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5ff33ba35f8500425d07b6bfd353cd16 columnFamilyName l 2024-12-16T21:00:25,372 DEBUG [StoreOpener-5ff33ba35f8500425d07b6bfd353cd16-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:00:25,373 INFO [StoreOpener-5ff33ba35f8500425d07b6bfd353cd16-1 {}] regionserver.HStore(327): Store=5ff33ba35f8500425d07b6bfd353cd16/l, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T21:00:25,376 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/hbase/acl/5ff33ba35f8500425d07b6bfd353cd16 2024-12-16T21:00:25,377 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/hbase/acl/5ff33ba35f8500425d07b6bfd353cd16 2024-12-16T21:00:25,382 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 5ff33ba35f8500425d07b6bfd353cd16 2024-12-16T21:00:25,386 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/hbase/acl/5ff33ba35f8500425d07b6bfd353cd16/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T21:00:25,387 INFO [RS_OPEN_PRIORITY_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(1102): Opened 5ff33ba35f8500425d07b6bfd353cd16; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62442156, jitterRate=-0.06953936815261841}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T21:00:25,390 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 5ff33ba35f8500425d07b6bfd353cd16: 2024-12-16T21:00:25,391 INFO [RS_OPEN_PRIORITY_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:acl,,1734382824921.5ff33ba35f8500425d07b6bfd353cd16., pid=11, masterSystemTime=1734382825322 2024-12-16T21:00:25,394 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:acl,,1734382824921.5ff33ba35f8500425d07b6bfd353cd16. 2024-12-16T21:00:25,394 INFO [RS_OPEN_PRIORITY_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] handler.AssignRegionHandler(164): Opened hbase:acl,,1734382824921.5ff33ba35f8500425d07b6bfd353cd16. 2024-12-16T21:00:25,396 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=5ff33ba35f8500425d07b6bfd353cd16, regionState=OPEN, openSeqNum=2, regionLocation=6b259cf15929,42323,1734382821139 2024-12-16T21:00:25,406 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-16T21:00:25,407 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 5ff33ba35f8500425d07b6bfd353cd16, server=6b259cf15929,42323,1734382821139 in 249 msec 2024-12-16T21:00:25,412 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T21:00:25,413 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382825413"}]},"ts":"1734382825413"} 2024-12-16T21:00:25,413 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-16T21:00:25,413 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=hbase:acl, region=5ff33ba35f8500425d07b6bfd353cd16, ASSIGN in 415 msec 2024-12-16T21:00:25,416 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:acl, state=ENABLED in hbase:meta 2024-12-16T21:00:25,421 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T21:00:25,425 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=hbase:acl in 498 msec 2024-12-16T21:00:25,538 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-16T21:00:25,538 INFO [master/6b259cf15929:0:becomeActiveMaster {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: hbase:acl, procId: 9 completed 2024-12-16T21:00:25,559 DEBUG [master/6b259cf15929:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-16T21:00:25,561 INFO [master/6b259cf15929:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-16T21:00:25,561 INFO [master/6b259cf15929:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6b259cf15929,34343,1734382820356-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-16T21:00:26,891 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T21:00:27,017 WARN [Thread-398 {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T21:00:27,275 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T21:00:27,279 WARN [Thread-398 {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-16T21:00:27,280 INFO [Thread-398 {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T21:00:27,301 INFO [Thread-398 {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T21:00:27,301 INFO [Thread-398 {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T21:00:27,302 INFO [Thread-398 {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-16T21:00:27,323 INFO [Thread-398 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@a809efb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/hadoop.log.dir/,AVAILABLE} 2024-12-16T21:00:27,323 INFO [Thread-398 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@15266e52{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-16T21:00:27,332 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T21:00:27,332 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T21:00:27,332 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-16T21:00:27,338 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T21:00:27,347 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@39328466{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/hadoop.log.dir/,AVAILABLE} 2024-12-16T21:00:27,348 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@268d8bfd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-16T21:00:27,504 INFO [Thread-398 {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.mapreduce.v2.hs.webapp.HsWebServices as a root resource class 2024-12-16T21:00:27,504 INFO [Thread-398 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.mapreduce.v2.hs.webapp.JAXBContextResolver as a provider class 2024-12-16T21:00:27,504 INFO [Thread-398 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-16T21:00:27,508 INFO [Thread-398 {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-16T21:00:27,563 INFO [Thread-398 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.mapreduce.v2.hs.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-16T21:00:27,813 INFO [Thread-398 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-16T21:00:28,114 INFO [Thread-398 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.mapreduce.v2.hs.webapp.HsWebServices to GuiceManagedComponentProvider with the scope "PerRequest" 2024-12-16T21:00:28,145 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4c30d1c{cluster,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/java.io.tmpdir/jetty-localhost-41793-hadoop-yarn-common-3_4_1_jar-_-any-13503435052725345561/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-16T21:00:28,145 INFO [Thread-398 {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@31993999{jobhistory,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/java.io.tmpdir/jetty-localhost-46707-hadoop-yarn-common-3_4_1_jar-_-any-11543010031394173186/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-16T21:00:28,146 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@64e87700{HTTP/1.1, (http/1.1)}{localhost:41793} 2024-12-16T21:00:28,146 INFO [Thread-398 {}] server.AbstractConnector(333): Started ServerConnector@49999e44{HTTP/1.1, (http/1.1)}{localhost:46707} 2024-12-16T21:00:28,146 INFO [Time-limited test {}] server.Server(415): Started @14633ms 2024-12-16T21:00:28,146 INFO [Thread-398 {}] server.Server(415): Started @14633ms 2024-12-16T21:00:28,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741843_1019 (size=5) 2024-12-16T21:00:28,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741843_1019 (size=5) 2024-12-16T21:00:28,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741843_1019 (size=5) 2024-12-16T21:00:29,230 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-16T21:00:29,236 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T21:00:29,276 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-16T21:00:29,277 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T21:00:29,285 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T21:00:29,285 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T21:00:29,285 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-16T21:00:29,286 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T21:00:29,294 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f570563{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/hadoop.log.dir/,AVAILABLE} 2024-12-16T21:00:29,295 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4e6d339c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-16T21:00:29,365 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-16T21:00:29,366 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-16T21:00:29,366 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-16T21:00:29,366 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-16T21:00:29,378 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-16T21:00:29,403 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-16T21:00:29,575 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-16T21:00:29,590 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6b8e94fd{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/java.io.tmpdir/jetty-localhost-32843-hadoop-yarn-common-3_4_1_jar-_-any-2852369111166655369/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-16T21:00:29,591 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1cd22596{HTTP/1.1, (http/1.1)}{localhost:32843} 2024-12-16T21:00:29,591 INFO [Time-limited test {}] server.Server(415): Started @16078ms 2024-12-16T21:00:29,592 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-16T21:00:29,681 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-16T21:00:29,683 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl' 2024-12-16T21:00:29,685 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-16T21:00:29,885 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-16T21:00:29,887 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T21:00:29,901 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-16T21:00:29,902 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T21:00:29,910 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T21:00:29,910 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T21:00:29,910 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-16T21:00:29,912 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T21:00:29,913 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5b029b2d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/hadoop.log.dir/,AVAILABLE} 2024-12-16T21:00:29,913 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5b38e25f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-16T21:00:29,974 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-16T21:00:29,975 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-16T21:00:29,975 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-16T21:00:29,975 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-16T21:00:29,993 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-16T21:00:30,002 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-16T21:00:30,115 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-16T21:00:30,121 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4792a562{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/java.io.tmpdir/jetty-localhost-35853-hadoop-yarn-common-3_4_1_jar-_-any-13295984476519493654/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-16T21:00:30,122 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5cad8bdd{HTTP/1.1, (http/1.1)}{localhost:35853} 2024-12-16T21:00:30,122 INFO [Time-limited test {}] server.Server(415): Started @16609ms 2024-12-16T21:00:30,160 INFO [Time-limited test {}] hbase.HBaseTestingUtility(2825): Mini mapreduce cluster started 2024-12-16T21:00:30,161 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T21:00:30,192 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=723, OpenFileDescriptor=783, MaxFileDescriptor=1048576, SystemLoadAverage=152, ProcessCount=11, AvailableMemoryMB=6269 2024-12-16T21:00:30,192 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=723 is superior to 500 2024-12-16T21:00:30,211 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-16T21:00:30,216 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41110, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-16T21:00:30,224 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T21:00:30,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportWithTargetName 2024-12-16T21:00:30,228 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T21:00:30,228 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithTargetName" procId is: 12 2024-12-16T21:00:30,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-16T21:00:30,234 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:00:30,236 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T21:00:30,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741844_1020 (size=406) 2024-12-16T21:00:30,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741844_1020 (size=406) 2024-12-16T21:00:30,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741844_1020 (size=406) 2024-12-16T21:00:30,302 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 3c10ba856da399deffd7d994922192f1, NAME => 'testtb-testExportWithTargetName,,1734382830223.3c10ba856da399deffd7d994922192f1.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:00:30,304 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 45a6ef929e2b00d4f529d744a8eceb56, NAME => 'testtb-testExportWithTargetName,1,1734382830223.45a6ef929e2b00d4f529d744a8eceb56.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:00:30,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-16T21:00:30,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741845_1021 (size=67) 2024-12-16T21:00:30,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741845_1021 (size=67) 2024-12-16T21:00:30,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741845_1021 (size=67) 2024-12-16T21:00:30,361 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithTargetName,1,1734382830223.45a6ef929e2b00d4f529d744a8eceb56.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:00:30,362 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1681): Closing 45a6ef929e2b00d4f529d744a8eceb56, disabling compactions & flushes 2024-12-16T21:00:30,362 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithTargetName,1,1734382830223.45a6ef929e2b00d4f529d744a8eceb56. 2024-12-16T21:00:30,362 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithTargetName,1,1734382830223.45a6ef929e2b00d4f529d744a8eceb56. 2024-12-16T21:00:30,362 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithTargetName,1,1734382830223.45a6ef929e2b00d4f529d744a8eceb56. after waiting 0 ms 2024-12-16T21:00:30,362 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithTargetName,1,1734382830223.45a6ef929e2b00d4f529d744a8eceb56. 2024-12-16T21:00:30,362 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportWithTargetName,1,1734382830223.45a6ef929e2b00d4f529d744a8eceb56. 2024-12-16T21:00:30,362 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1635): Region close journal for 45a6ef929e2b00d4f529d744a8eceb56: 2024-12-16T21:00:30,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741846_1022 (size=67) 2024-12-16T21:00:30,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741846_1022 (size=67) 2024-12-16T21:00:30,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741846_1022 (size=67) 2024-12-16T21:00:30,379 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithTargetName,,1734382830223.3c10ba856da399deffd7d994922192f1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:00:30,380 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1681): Closing 3c10ba856da399deffd7d994922192f1, disabling compactions & flushes 2024-12-16T21:00:30,380 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithTargetName,,1734382830223.3c10ba856da399deffd7d994922192f1. 2024-12-16T21:00:30,380 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithTargetName,,1734382830223.3c10ba856da399deffd7d994922192f1. 2024-12-16T21:00:30,380 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithTargetName,,1734382830223.3c10ba856da399deffd7d994922192f1. after waiting 0 ms 2024-12-16T21:00:30,380 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithTargetName,,1734382830223.3c10ba856da399deffd7d994922192f1. 2024-12-16T21:00:30,380 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportWithTargetName,,1734382830223.3c10ba856da399deffd7d994922192f1. 2024-12-16T21:00:30,380 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1635): Region close journal for 3c10ba856da399deffd7d994922192f1: 2024-12-16T21:00:30,382 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T21:00:30,383 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,1,1734382830223.45a6ef929e2b00d4f529d744a8eceb56.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1734382830383"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734382830383"}]},"ts":"1734382830383"} 2024-12-16T21:00:30,383 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,,1734382830223.3c10ba856da399deffd7d994922192f1.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1734382830383"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734382830383"}]},"ts":"1734382830383"} 2024-12-16T21:00:30,422 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-16T21:00:30,423 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T21:00:30,424 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382830424"}]},"ts":"1734382830424"} 2024-12-16T21:00:30,427 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithTargetName, state=ENABLING in hbase:meta 2024-12-16T21:00:30,432 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(202): Hosts are {6b259cf15929=0} racks are {/default-rack=0} 2024-12-16T21:00:30,437 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-16T21:00:30,437 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-16T21:00:30,437 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-16T21:00:30,437 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-16T21:00:30,437 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-16T21:00:30,437 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-16T21:00:30,437 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-16T21:00:30,438 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=3c10ba856da399deffd7d994922192f1, ASSIGN}, {pid=14, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=45a6ef929e2b00d4f529d744a8eceb56, ASSIGN}] 2024-12-16T21:00:30,443 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=14, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=45a6ef929e2b00d4f529d744a8eceb56, ASSIGN 2024-12-16T21:00:30,443 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=3c10ba856da399deffd7d994922192f1, ASSIGN 2024-12-16T21:00:30,445 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=14, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=45a6ef929e2b00d4f529d744a8eceb56, ASSIGN; state=OFFLINE, location=6b259cf15929,41713,1734382821274; forceNewPlan=false, retain=false 2024-12-16T21:00:30,445 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=3c10ba856da399deffd7d994922192f1, ASSIGN; state=OFFLINE, location=6b259cf15929,36861,1734382821216; forceNewPlan=false, retain=false 2024-12-16T21:00:30,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-16T21:00:30,595 INFO [6b259cf15929:34343 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-16T21:00:30,596 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=13 updating hbase:meta row=3c10ba856da399deffd7d994922192f1, regionState=OPENING, regionLocation=6b259cf15929,36861,1734382821216 2024-12-16T21:00:30,596 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=14 updating hbase:meta row=45a6ef929e2b00d4f529d744a8eceb56, regionState=OPENING, regionLocation=6b259cf15929,41713,1734382821274 2024-12-16T21:00:30,599 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; OpenRegionProcedure 45a6ef929e2b00d4f529d744a8eceb56, server=6b259cf15929,41713,1734382821274}] 2024-12-16T21:00:30,602 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=16, ppid=13, state=RUNNABLE; OpenRegionProcedure 3c10ba856da399deffd7d994922192f1, server=6b259cf15929,36861,1734382821216}] 2024-12-16T21:00:30,753 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6b259cf15929,41713,1734382821274 2024-12-16T21:00:30,756 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6b259cf15929,36861,1734382821216 2024-12-16T21:00:30,757 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-16T21:00:30,782 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51054, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-16T21:00:30,783 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] handler.AssignRegionHandler(135): Open testtb-testExportWithTargetName,1,1734382830223.45a6ef929e2b00d4f529d744a8eceb56. 2024-12-16T21:00:30,784 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7285): Opening region: {ENCODED => 45a6ef929e2b00d4f529d744a8eceb56, NAME => 'testtb-testExportWithTargetName,1,1734382830223.45a6ef929e2b00d4f529d744a8eceb56.', STARTKEY => '1', ENDKEY => ''} 2024-12-16T21:00:30,784 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithTargetName,1,1734382830223.45a6ef929e2b00d4f529d744a8eceb56. service=AccessControlService 2024-12-16T21:00:30,784 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T21:00:30,785 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName 45a6ef929e2b00d4f529d744a8eceb56 2024-12-16T21:00:30,785 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(894): Instantiated testtb-testExportWithTargetName,1,1734382830223.45a6ef929e2b00d4f529d744a8eceb56.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:00:30,785 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7327): checking encryption for 45a6ef929e2b00d4f529d744a8eceb56 2024-12-16T21:00:30,785 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7330): checking classloading for 45a6ef929e2b00d4f529d744a8eceb56 2024-12-16T21:00:30,787 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] handler.AssignRegionHandler(135): Open testtb-testExportWithTargetName,,1734382830223.3c10ba856da399deffd7d994922192f1. 2024-12-16T21:00:30,787 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(7285): Opening region: {ENCODED => 3c10ba856da399deffd7d994922192f1, NAME => 'testtb-testExportWithTargetName,,1734382830223.3c10ba856da399deffd7d994922192f1.', STARTKEY => '', ENDKEY => '1'} 2024-12-16T21:00:30,788 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithTargetName,,1734382830223.3c10ba856da399deffd7d994922192f1. service=AccessControlService 2024-12-16T21:00:30,788 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T21:00:30,788 INFO [StoreOpener-45a6ef929e2b00d4f529d744a8eceb56-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 45a6ef929e2b00d4f529d744a8eceb56 2024-12-16T21:00:30,788 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName 3c10ba856da399deffd7d994922192f1 2024-12-16T21:00:30,788 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(894): Instantiated testtb-testExportWithTargetName,,1734382830223.3c10ba856da399deffd7d994922192f1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:00:30,788 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(7327): checking encryption for 3c10ba856da399deffd7d994922192f1 2024-12-16T21:00:30,788 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(7330): checking classloading for 3c10ba856da399deffd7d994922192f1 2024-12-16T21:00:30,790 INFO [StoreOpener-45a6ef929e2b00d4f529d744a8eceb56-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 45a6ef929e2b00d4f529d744a8eceb56 columnFamilyName cf 2024-12-16T21:00:30,790 DEBUG [StoreOpener-45a6ef929e2b00d4f529d744a8eceb56-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:00:30,791 INFO [StoreOpener-45a6ef929e2b00d4f529d744a8eceb56-1 {}] regionserver.HStore(327): Store=45a6ef929e2b00d4f529d744a8eceb56/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T21:00:30,792 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithTargetName/45a6ef929e2b00d4f529d744a8eceb56 2024-12-16T21:00:30,793 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithTargetName/45a6ef929e2b00d4f529d744a8eceb56 2024-12-16T21:00:30,796 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1085): writing seq id for 45a6ef929e2b00d4f529d744a8eceb56 2024-12-16T21:00:30,804 INFO [StoreOpener-3c10ba856da399deffd7d994922192f1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3c10ba856da399deffd7d994922192f1 2024-12-16T21:00:30,805 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithTargetName/45a6ef929e2b00d4f529d744a8eceb56/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T21:00:30,806 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1102): Opened 45a6ef929e2b00d4f529d744a8eceb56; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71432890, jitterRate=0.0644330084323883}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T21:00:30,807 INFO [StoreOpener-3c10ba856da399deffd7d994922192f1-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3c10ba856da399deffd7d994922192f1 columnFamilyName cf 2024-12-16T21:00:30,807 DEBUG [StoreOpener-3c10ba856da399deffd7d994922192f1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:00:30,808 INFO [StoreOpener-3c10ba856da399deffd7d994922192f1-1 {}] regionserver.HStore(327): Store=3c10ba856da399deffd7d994922192f1/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T21:00:30,808 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1001): Region open journal for 45a6ef929e2b00d4f529d744a8eceb56: 2024-12-16T21:00:30,809 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithTargetName/3c10ba856da399deffd7d994922192f1 2024-12-16T21:00:30,809 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithTargetName,1,1734382830223.45a6ef929e2b00d4f529d744a8eceb56., pid=15, masterSystemTime=1734382830753 2024-12-16T21:00:30,810 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithTargetName/3c10ba856da399deffd7d994922192f1 2024-12-16T21:00:30,812 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithTargetName,1,1734382830223.45a6ef929e2b00d4f529d744a8eceb56. 2024-12-16T21:00:30,812 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] handler.AssignRegionHandler(164): Opened testtb-testExportWithTargetName,1,1734382830223.45a6ef929e2b00d4f529d744a8eceb56. 2024-12-16T21:00:30,813 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=14 updating hbase:meta row=45a6ef929e2b00d4f529d744a8eceb56, regionState=OPEN, openSeqNum=2, regionLocation=6b259cf15929,41713,1734382821274 2024-12-16T21:00:30,813 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(1085): writing seq id for 3c10ba856da399deffd7d994922192f1 2024-12-16T21:00:30,817 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithTargetName/3c10ba856da399deffd7d994922192f1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T21:00:30,818 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(1102): Opened 3c10ba856da399deffd7d994922192f1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74100023, jitterRate=0.10417638719081879}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T21:00:30,818 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(1001): Region open journal for 3c10ba856da399deffd7d994922192f1: 2024-12-16T21:00:30,819 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-12-16T21:00:30,819 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; OpenRegionProcedure 45a6ef929e2b00d4f529d744a8eceb56, server=6b259cf15929,41713,1734382821274 in 217 msec 2024-12-16T21:00:30,820 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithTargetName,,1734382830223.3c10ba856da399deffd7d994922192f1., pid=16, masterSystemTime=1734382830756 2024-12-16T21:00:30,822 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithTargetName,,1734382830223.3c10ba856da399deffd7d994922192f1. 2024-12-16T21:00:30,822 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] handler.AssignRegionHandler(164): Opened testtb-testExportWithTargetName,,1734382830223.3c10ba856da399deffd7d994922192f1. 2024-12-16T21:00:30,823 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=45a6ef929e2b00d4f529d744a8eceb56, ASSIGN in 381 msec 2024-12-16T21:00:30,823 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=13 updating hbase:meta row=3c10ba856da399deffd7d994922192f1, regionState=OPEN, openSeqNum=2, regionLocation=6b259cf15929,36861,1734382821216 2024-12-16T21:00:30,828 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=16, resume processing ppid=13 2024-12-16T21:00:30,828 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, ppid=13, state=SUCCESS; OpenRegionProcedure 3c10ba856da399deffd7d994922192f1, server=6b259cf15929,36861,1734382821216 in 224 msec 2024-12-16T21:00:30,831 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-16T21:00:30,832 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=3c10ba856da399deffd7d994922192f1, ASSIGN in 390 msec 2024-12-16T21:00:30,832 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T21:00:30,833 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382830832"}]},"ts":"1734382830832"} 2024-12-16T21:00:30,835 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithTargetName, state=ENABLED in hbase:meta 2024-12-16T21:00:30,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-16T21:00:30,839 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T21:00:30,843 DEBUG [PEWorker-3 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportWithTargetName jenkins: RWXCA 2024-12-16T21:00:30,851 DEBUG [hconnection-0x4adeb78-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:00:30,852 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55008, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-12-16T21:00:30,862 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42323 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-16T21:00:30,872 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T21:00:30,872 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionObservers 2024-12-16T21:00:30,873 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-16T21:00:30,873 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-16T21:00:30,874 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T21:00:30,874 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionServerObservers 2024-12-16T21:00:30,874 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-16T21:00:30,874 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-16T21:00:30,875 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-16T21:00:30,875 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-16T21:00:30,876 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-16T21:00:30,876 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_acl Metrics about Tables on a single HBase RegionServer 2024-12-16T21:00:30,877 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T21:00:30,877 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase MasterObservers 2024-12-16T21:00:30,877 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-16T21:00:30,877 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver Metrics about HBase MasterObservers 2024-12-16T21:00:30,877 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-16T21:00:30,877 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-16T21:00:30,878 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-16T21:00:30,878 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName Metrics about Tables on a single HBase RegionServer 2024-12-16T21:00:30,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-16T21:00:30,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T21:00:30,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-16T21:00:30,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T21:00:30,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-16T21:00:30,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T21:00:30,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-16T21:00:30,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T21:00:30,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:00:30,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:00:30,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-16T21:00:30,904 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-16T21:00:30,905 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-16T21:00:30,905 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-16T21:00:30,907 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-16T21:00:30,909 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; CreateTableProcedure table=testtb-testExportWithTargetName in 681 msec 2024-12-16T21:00:31,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-16T21:00:31,341 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportWithTargetName, procId: 12 completed 2024-12-16T21:00:31,341 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportWithTargetName get assigned. Timeout = 60000ms 2024-12-16T21:00:31,342 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T21:00:31,349 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportWithTargetName assigned to meta. Checking AM states. 2024-12-16T21:00:31,350 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T21:00:31,350 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportWithTargetName assigned. 2024-12-16T21:00:31,366 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-16T21:00:31,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734382831366 (current time:1734382831366). 2024-12-16T21:00:31,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-16T21:00:31,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-16T21:00:31,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T21:00:31,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x27c34dcf to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@37b58e67 2024-12-16T21:00:31,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22311d15, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:00:31,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:00:31,391 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55016, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:00:31,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x27c34dcf to 127.0.0.1:61742 2024-12-16T21:00:31,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:00:31,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1db27106 to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c4c4459 2024-12-16T21:00:31,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c9b02a4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:00:31,403 DEBUG [hconnection-0x65258866-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:00:31,404 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55018, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:00:31,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:00:31,407 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45642, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:00:31,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1db27106 to 127.0.0.1:61742 2024-12-16T21:00:31,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:00:31,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-16T21:00:31,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T21:00:31,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=17, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-16T21:00:31,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 17 2024-12-16T21:00:31,439 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-16T21:00:31,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=17 2024-12-16T21:00:31,445 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T21:00:31,462 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T21:00:31,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741847_1023 (size=167) 2024-12-16T21:00:31,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741847_1023 (size=167) 2024-12-16T21:00:31,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741847_1023 (size=167) 2024-12-16T21:00:31,481 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T21:00:31,483 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=18, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 3c10ba856da399deffd7d994922192f1}, {pid=19, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 45a6ef929e2b00d4f529d744a8eceb56}] 2024-12-16T21:00:31,488 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=18, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 3c10ba856da399deffd7d994922192f1 2024-12-16T21:00:31,488 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=19, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 45a6ef929e2b00d4f529d744a8eceb56 2024-12-16T21:00:31,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=17 2024-12-16T21:00:31,645 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6b259cf15929,36861,1734382821216 2024-12-16T21:00:31,645 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6b259cf15929,41713,1734382821274 2024-12-16T21:00:31,646 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36861 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=18 2024-12-16T21:00:31,647 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41713 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=19 2024-12-16T21:00:31,648 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1734382830223.3c10ba856da399deffd7d994922192f1. 2024-12-16T21:00:31,650 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1734382830223.45a6ef929e2b00d4f529d744a8eceb56. 2024-12-16T21:00:31,652 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.HRegion(2538): Flush status journal for 3c10ba856da399deffd7d994922192f1: 2024-12-16T21:00:31,652 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 45a6ef929e2b00d4f529d744a8eceb56: 2024-12-16T21:00:31,652 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1734382830223.3c10ba856da399deffd7d994922192f1. for emptySnaptb0-testExportWithTargetName completed. 2024-12-16T21:00:31,652 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1734382830223.45a6ef929e2b00d4f529d744a8eceb56. for emptySnaptb0-testExportWithTargetName completed. 2024-12-16T21:00:31,653 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1734382830223.3c10ba856da399deffd7d994922192f1.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-16T21:00:31,653 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1734382830223.45a6ef929e2b00d4f529d744a8eceb56.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-16T21:00:31,657 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T21:00:31,657 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T21:00:31,659 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-16T21:00:31,659 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-16T21:00:31,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741849_1025 (size=70) 2024-12-16T21:00:31,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741848_1024 (size=70) 2024-12-16T21:00:31,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741848_1024 (size=70) 2024-12-16T21:00:31,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741848_1024 (size=70) 2024-12-16T21:00:31,693 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1734382830223.3c10ba856da399deffd7d994922192f1. 2024-12-16T21:00:31,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741849_1025 (size=70) 2024-12-16T21:00:31,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741849_1025 (size=70) 2024-12-16T21:00:31,696 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=18 2024-12-16T21:00:31,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster(4106): Remote procedure done, pid=18 2024-12-16T21:00:31,698 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region 3c10ba856da399deffd7d994922192f1 2024-12-16T21:00:31,699 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=18, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 3c10ba856da399deffd7d994922192f1 2024-12-16T21:00:31,699 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1734382830223.45a6ef929e2b00d4f529d744a8eceb56. 2024-12-16T21:00:31,699 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-12-16T21:00:31,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-12-16T21:00:31,701 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region 45a6ef929e2b00d4f529d744a8eceb56 2024-12-16T21:00:31,702 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=19, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 45a6ef929e2b00d4f529d744a8eceb56 2024-12-16T21:00:31,704 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, ppid=17, state=SUCCESS; SnapshotRegionProcedure 3c10ba856da399deffd7d994922192f1 in 217 msec 2024-12-16T21:00:31,707 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=17 2024-12-16T21:00:31,707 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=17, state=SUCCESS; SnapshotRegionProcedure 45a6ef929e2b00d4f529d744a8eceb56 in 221 msec 2024-12-16T21:00:31,707 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T21:00:31,710 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T21:00:31,714 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T21:00:31,714 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithTargetName 2024-12-16T21:00:31,717 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName 2024-12-16T21:00:31,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741850_1026 (size=549) 2024-12-16T21:00:31,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741850_1026 (size=549) 2024-12-16T21:00:31,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741850_1026 (size=549) 2024-12-16T21:00:31,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=17 2024-12-16T21:00:31,746 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T21:00:31,759 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T21:00:31,760 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/emptySnaptb0-testExportWithTargetName 2024-12-16T21:00:31,763 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T21:00:31,763 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 17 2024-12-16T21:00:31,765 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 334 msec 2024-12-16T21:00:32,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=17 2024-12-16T21:00:32,047 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName, procId: 17 completed 2024-12-16T21:00:32,072 DEBUG [htable-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:00:32,076 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41713 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithTargetName,1,1734382830223.45a6ef929e2b00d4f529d744a8eceb56. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T21:00:32,076 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51062, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:00:32,077 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithTargetName,,1734382830223.3c10ba856da399deffd7d994922192f1. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T21:00:32,087 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportWithTargetName 2024-12-16T21:00:32,088 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportWithTargetName,,1734382830223.3c10ba856da399deffd7d994922192f1. 2024-12-16T21:00:32,089 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T21:00:32,116 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-16T21:00:32,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734382832116 (current time:1734382832116). 2024-12-16T21:00:32,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-16T21:00:32,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-16T21:00:32,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T21:00:32,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x432165d3 to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@50f9d16d 2024-12-16T21:00:32,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@480dd96a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:00:32,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:00:32,126 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55024, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:00:32,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x432165d3 to 127.0.0.1:61742 2024-12-16T21:00:32,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:00:32,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7abf88f3 to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@64535c70 2024-12-16T21:00:32,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42b31f19, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:00:32,134 DEBUG [hconnection-0x33de6731-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:00:32,136 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55038, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:00:32,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:00:32,139 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45648, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:00:32,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7abf88f3 to 127.0.0.1:61742 2024-12-16T21:00:32,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:00:32,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-16T21:00:32,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T21:00:32,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-16T21:00:32,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 20 2024-12-16T21:00:32,145 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-16T21:00:32,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-16T21:00:32,147 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T21:00:32,151 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T21:00:32,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741851_1027 (size=162) 2024-12-16T21:00:32,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741851_1027 (size=162) 2024-12-16T21:00:32,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741851_1027 (size=162) 2024-12-16T21:00:32,169 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T21:00:32,169 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 3c10ba856da399deffd7d994922192f1}, {pid=22, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 45a6ef929e2b00d4f529d744a8eceb56}] 2024-12-16T21:00:32,171 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=21, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 3c10ba856da399deffd7d994922192f1 2024-12-16T21:00:32,171 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=22, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 45a6ef929e2b00d4f529d744a8eceb56 2024-12-16T21:00:32,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-16T21:00:32,323 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6b259cf15929,41713,1734382821274 2024-12-16T21:00:32,323 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6b259cf15929,36861,1734382821216 2024-12-16T21:00:32,324 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41713 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=22 2024-12-16T21:00:32,324 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36861 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=21 2024-12-16T21:00:32,324 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1734382830223.45a6ef929e2b00d4f529d744a8eceb56. 2024-12-16T21:00:32,325 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HRegion(2837): Flushing 45a6ef929e2b00d4f529d744a8eceb56 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-16T21:00:32,327 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1734382830223.3c10ba856da399deffd7d994922192f1. 2024-12-16T21:00:32,327 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 3c10ba856da399deffd7d994922192f1 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-16T21:00:32,409 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithTargetName/45a6ef929e2b00d4f529d744a8eceb56/.tmp/cf/ff6d166c49484871a1a06abd815fc1ed is 71, key is 14d39ae40ef19a65ca20424e6913bfc3/cf:q/1734382832075/Put/seqid=0 2024-12-16T21:00:32,409 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithTargetName/3c10ba856da399deffd7d994922192f1/.tmp/cf/8e1dbab159cc463790c3097a616b7287 is 71, key is 02096e66c98f773f97f9b652ad45242b/cf:q/1734382832077/Put/seqid=0 2024-12-16T21:00:32,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741853_1029 (size=8326) 2024-12-16T21:00:32,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741853_1029 (size=8326) 2024-12-16T21:00:32,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741853_1029 (size=8326) 2024-12-16T21:00:32,434 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithTargetName/45a6ef929e2b00d4f529d744a8eceb56/.tmp/cf/ff6d166c49484871a1a06abd815fc1ed 2024-12-16T21:00:32,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-16T21:00:32,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741852_1028 (size=5288) 2024-12-16T21:00:32,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741852_1028 (size=5288) 2024-12-16T21:00:32,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741852_1028 (size=5288) 2024-12-16T21:00:32,459 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithTargetName/3c10ba856da399deffd7d994922192f1/.tmp/cf/8e1dbab159cc463790c3097a616b7287 2024-12-16T21:00:32,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithTargetName/45a6ef929e2b00d4f529d744a8eceb56/.tmp/cf/ff6d166c49484871a1a06abd815fc1ed as hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithTargetName/45a6ef929e2b00d4f529d744a8eceb56/cf/ff6d166c49484871a1a06abd815fc1ed 2024-12-16T21:00:32,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithTargetName/3c10ba856da399deffd7d994922192f1/.tmp/cf/8e1dbab159cc463790c3097a616b7287 as hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithTargetName/3c10ba856da399deffd7d994922192f1/cf/8e1dbab159cc463790c3097a616b7287 2024-12-16T21:00:32,520 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithTargetName/45a6ef929e2b00d4f529d744a8eceb56/cf/ff6d166c49484871a1a06abd815fc1ed, entries=47, sequenceid=6, filesize=8.1 K 2024-12-16T21:00:32,521 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithTargetName/3c10ba856da399deffd7d994922192f1/cf/8e1dbab159cc463790c3097a616b7287, entries=3, sequenceid=6, filesize=5.2 K 2024-12-16T21:00:32,524 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 3c10ba856da399deffd7d994922192f1 in 197ms, sequenceid=6, compaction requested=false 2024-12-16T21:00:32,524 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HRegion(3040): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 45a6ef929e2b00d4f529d744a8eceb56 in 199ms, sequenceid=6, compaction requested=false 2024-12-16T21:00:32,524 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName' 2024-12-16T21:00:32,524 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName' 2024-12-16T21:00:32,526 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HRegion(2538): Flush status journal for 45a6ef929e2b00d4f529d744a8eceb56: 2024-12-16T21:00:32,526 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 3c10ba856da399deffd7d994922192f1: 2024-12-16T21:00:32,526 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1734382830223.3c10ba856da399deffd7d994922192f1. for snaptb0-testExportWithTargetName completed. 2024-12-16T21:00:32,526 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1734382830223.45a6ef929e2b00d4f529d744a8eceb56. for snaptb0-testExportWithTargetName completed. 2024-12-16T21:00:32,526 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1734382830223.3c10ba856da399deffd7d994922192f1.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-16T21:00:32,526 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1734382830223.45a6ef929e2b00d4f529d744a8eceb56.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-16T21:00:32,526 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T21:00:32,526 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T21:00:32,527 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithTargetName/3c10ba856da399deffd7d994922192f1/cf/8e1dbab159cc463790c3097a616b7287] hfiles 2024-12-16T21:00:32,527 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithTargetName/45a6ef929e2b00d4f529d744a8eceb56/cf/ff6d166c49484871a1a06abd815fc1ed] hfiles 2024-12-16T21:00:32,527 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithTargetName/3c10ba856da399deffd7d994922192f1/cf/8e1dbab159cc463790c3097a616b7287 for snapshot=snaptb0-testExportWithTargetName 2024-12-16T21:00:32,527 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithTargetName/45a6ef929e2b00d4f529d744a8eceb56/cf/ff6d166c49484871a1a06abd815fc1ed for snapshot=snaptb0-testExportWithTargetName 2024-12-16T21:00:32,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741854_1030 (size=109) 2024-12-16T21:00:32,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741854_1030 (size=109) 2024-12-16T21:00:32,584 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1734382830223.3c10ba856da399deffd7d994922192f1. 2024-12-16T21:00:32,585 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-12-16T21:00:32,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741854_1030 (size=109) 2024-12-16T21:00:32,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-12-16T21:00:32,586 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region 3c10ba856da399deffd7d994922192f1 2024-12-16T21:00:32,586 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=21, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 3c10ba856da399deffd7d994922192f1 2024-12-16T21:00:32,594 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; SnapshotRegionProcedure 3c10ba856da399deffd7d994922192f1 in 420 msec 2024-12-16T21:00:32,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741855_1031 (size=109) 2024-12-16T21:00:32,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741855_1031 (size=109) 2024-12-16T21:00:32,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741855_1031 (size=109) 2024-12-16T21:00:32,607 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1734382830223.45a6ef929e2b00d4f529d744a8eceb56. 2024-12-16T21:00:32,607 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=22 2024-12-16T21:00:32,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster(4106): Remote procedure done, pid=22 2024-12-16T21:00:32,608 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region 45a6ef929e2b00d4f529d744a8eceb56 2024-12-16T21:00:32,609 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=22, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 45a6ef929e2b00d4f529d744a8eceb56 2024-12-16T21:00:32,614 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=22, resume processing ppid=20 2024-12-16T21:00:32,614 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, ppid=20, state=SUCCESS; SnapshotRegionProcedure 45a6ef929e2b00d4f529d744a8eceb56 in 441 msec 2024-12-16T21:00:32,614 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T21:00:32,615 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T21:00:32,616 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T21:00:32,616 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithTargetName 2024-12-16T21:00:32,618 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName 2024-12-16T21:00:32,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741856_1032 (size=627) 2024-12-16T21:00:32,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741856_1032 (size=627) 2024-12-16T21:00:32,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741856_1032 (size=627) 2024-12-16T21:00:32,687 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T21:00:32,705 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T21:00:32,708 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-16T21:00:32,710 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T21:00:32,710 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 20 2024-12-16T21:00:32,712 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 568 msec 2024-12-16T21:00:32,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-16T21:00:32,751 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName, procId: 20 completed 2024-12-16T21:00:32,752 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382832752 2024-12-16T21:00:32,752 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:37263, tgtDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382832752, rawTgtDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382832752, srcFsUri=hdfs://localhost:37263, srcDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:00:32,801 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:37263, inputRoot=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:00:32,801 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-715246216_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382832752, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382832752/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-16T21:00:32,806 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-16T21:00:32,826 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testExportWithTargetName to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382832752/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-16T21:00:32,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741857_1033 (size=162) 2024-12-16T21:00:32,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741857_1033 (size=162) 2024-12-16T21:00:32,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741857_1033 (size=162) 2024-12-16T21:00:32,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741858_1034 (size=627) 2024-12-16T21:00:32,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741858_1034 (size=627) 2024-12-16T21:00:32,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741858_1034 (size=627) 2024-12-16T21:00:32,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741859_1035 (size=154) 2024-12-16T21:00:32,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741859_1035 (size=154) 2024-12-16T21:00:32,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741859_1035 (size=154) 2024-12-16T21:00:32,959 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-16T21:00:32,959 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-16T21:00:32,960 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-16T21:00:32,960 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-16T21:00:34,163 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/hadoop-13215197722683595667.jar 2024-12-16T21:00:34,164 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-16T21:00:34,164 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-16T21:00:34,239 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/hadoop-2133369631245140182.jar 2024-12-16T21:00:34,240 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-16T21:00:34,241 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-16T21:00:34,241 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-16T21:00:34,241 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-16T21:00:34,242 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-16T21:00:34,242 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-16T21:00:34,242 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-16T21:00:34,243 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-16T21:00:34,243 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-16T21:00:34,244 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-16T21:00:34,244 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-16T21:00:34,244 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-16T21:00:34,245 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-16T21:00:34,245 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-16T21:00:34,295 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-16T21:00:34,296 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-16T21:00:34,297 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-16T21:00:34,297 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-16T21:00:34,300 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T21:00:34,301 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T21:00:34,301 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T21:00:34,302 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T21:00:34,302 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T21:00:34,303 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T21:00:34,303 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T21:00:34,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741860_1036 (size=127628) 2024-12-16T21:00:34,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741860_1036 (size=127628) 2024-12-16T21:00:34,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741860_1036 (size=127628) 2024-12-16T21:00:34,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741861_1037 (size=2172137) 2024-12-16T21:00:34,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741861_1037 (size=2172137) 2024-12-16T21:00:34,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741861_1037 (size=2172137) 2024-12-16T21:00:34,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741862_1038 (size=213228) 2024-12-16T21:00:34,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741862_1038 (size=213228) 2024-12-16T21:00:34,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741862_1038 (size=213228) 2024-12-16T21:00:34,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741863_1039 (size=1877034) 2024-12-16T21:00:34,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741863_1039 (size=1877034) 2024-12-16T21:00:34,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741863_1039 (size=1877034) 2024-12-16T21:00:34,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741864_1040 (size=533455) 2024-12-16T21:00:34,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741864_1040 (size=533455) 2024-12-16T21:00:34,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741864_1040 (size=533455) 2024-12-16T21:00:34,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741865_1041 (size=7280644) 2024-12-16T21:00:34,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741865_1041 (size=7280644) 2024-12-16T21:00:34,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741865_1041 (size=7280644) 2024-12-16T21:00:34,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741866_1042 (size=4188619) 2024-12-16T21:00:34,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741866_1042 (size=4188619) 2024-12-16T21:00:34,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741866_1042 (size=4188619) 2024-12-16T21:00:34,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741867_1043 (size=20406) 2024-12-16T21:00:34,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741867_1043 (size=20406) 2024-12-16T21:00:34,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741867_1043 (size=20406) 2024-12-16T21:00:34,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741868_1044 (size=75495) 2024-12-16T21:00:34,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741868_1044 (size=75495) 2024-12-16T21:00:34,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741868_1044 (size=75495) 2024-12-16T21:00:34,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741869_1045 (size=45609) 2024-12-16T21:00:34,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741869_1045 (size=45609) 2024-12-16T21:00:34,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741869_1045 (size=45609) 2024-12-16T21:00:34,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741870_1046 (size=110084) 2024-12-16T21:00:34,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741870_1046 (size=110084) 2024-12-16T21:00:34,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741870_1046 (size=110084) 2024-12-16T21:00:34,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741871_1047 (size=1323991) 2024-12-16T21:00:34,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741871_1047 (size=1323991) 2024-12-16T21:00:34,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741871_1047 (size=1323991) 2024-12-16T21:00:34,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741872_1048 (size=23076) 2024-12-16T21:00:34,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741872_1048 (size=23076) 2024-12-16T21:00:34,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741872_1048 (size=23076) 2024-12-16T21:00:34,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741873_1049 (size=451756) 2024-12-16T21:00:34,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741873_1049 (size=451756) 2024-12-16T21:00:34,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741873_1049 (size=451756) 2024-12-16T21:00:34,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741874_1050 (size=126803) 2024-12-16T21:00:34,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741874_1050 (size=126803) 2024-12-16T21:00:34,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741874_1050 (size=126803) 2024-12-16T21:00:34,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741875_1051 (size=322274) 2024-12-16T21:00:34,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741875_1051 (size=322274) 2024-12-16T21:00:34,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741875_1051 (size=322274) 2024-12-16T21:00:34,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741876_1052 (size=1832290) 2024-12-16T21:00:34,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741876_1052 (size=1832290) 2024-12-16T21:00:34,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741876_1052 (size=1832290) 2024-12-16T21:00:35,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741877_1053 (size=30081) 2024-12-16T21:00:35,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741877_1053 (size=30081) 2024-12-16T21:00:35,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741877_1053 (size=30081) 2024-12-16T21:00:35,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741878_1054 (size=53616) 2024-12-16T21:00:35,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741878_1054 (size=53616) 2024-12-16T21:00:35,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741878_1054 (size=53616) 2024-12-16T21:00:35,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741879_1055 (size=29229) 2024-12-16T21:00:35,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741879_1055 (size=29229) 2024-12-16T21:00:35,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741879_1055 (size=29229) 2024-12-16T21:00:35,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741880_1056 (size=169089) 2024-12-16T21:00:35,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741880_1056 (size=169089) 2024-12-16T21:00:35,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741880_1056 (size=169089) 2024-12-16T21:00:35,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741881_1057 (size=6350920) 2024-12-16T21:00:35,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741881_1057 (size=6350920) 2024-12-16T21:00:35,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741881_1057 (size=6350920) 2024-12-16T21:00:35,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741882_1058 (size=5175431) 2024-12-16T21:00:35,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741882_1058 (size=5175431) 2024-12-16T21:00:35,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741882_1058 (size=5175431) 2024-12-16T21:00:35,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741883_1059 (size=136454) 2024-12-16T21:00:35,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741883_1059 (size=136454) 2024-12-16T21:00:35,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741883_1059 (size=136454) 2024-12-16T21:00:35,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741884_1060 (size=907465) 2024-12-16T21:00:35,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741884_1060 (size=907465) 2024-12-16T21:00:35,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741884_1060 (size=907465) 2024-12-16T21:00:35,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741885_1061 (size=3317408) 2024-12-16T21:00:35,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741885_1061 (size=3317408) 2024-12-16T21:00:35,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741885_1061 (size=3317408) 2024-12-16T21:00:35,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741886_1062 (size=503880) 2024-12-16T21:00:35,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741886_1062 (size=503880) 2024-12-16T21:00:35,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741886_1062 (size=503880) 2024-12-16T21:00:35,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741887_1063 (size=4695811) 2024-12-16T21:00:35,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741887_1063 (size=4695811) 2024-12-16T21:00:35,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741887_1063 (size=4695811) 2024-12-16T21:00:35,513 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-16T21:00:35,523 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportWithTargetName' hfile list 2024-12-16T21:00:35,532 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-16T21:00:35,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741888_1064 (size=342) 2024-12-16T21:00:35,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741888_1064 (size=342) 2024-12-16T21:00:35,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741888_1064 (size=342) 2024-12-16T21:00:35,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741889_1065 (size=15) 2024-12-16T21:00:35,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741889_1065 (size=15) 2024-12-16T21:00:35,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741889_1065 (size=15) 2024-12-16T21:00:35,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741890_1066 (size=304888) 2024-12-16T21:00:35,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741890_1066 (size=304888) 2024-12-16T21:00:35,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741890_1066 (size=304888) 2024-12-16T21:00:36,263 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-16T21:00:36,264 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-16T21:00:36,291 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-16T21:00:36,893 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0001_000001 (auth:SIMPLE) from 127.0.0.1:58360 2024-12-16T21:00:44,430 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0001_000001 (auth:SIMPLE) from 127.0.0.1:52574 2024-12-16T21:00:44,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741891_1067 (size=350562) 2024-12-16T21:00:44,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741891_1067 (size=350562) 2024-12-16T21:00:44,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741891_1067 (size=350562) 2024-12-16T21:00:46,715 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0001_000001 (auth:SIMPLE) from 127.0.0.1:34614 2024-12-16T21:00:49,281 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T21:00:50,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741892_1068 (size=8326) 2024-12-16T21:00:50,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741892_1068 (size=8326) 2024-12-16T21:00:50,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741892_1068 (size=8326) 2024-12-16T21:00:50,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741893_1069 (size=5288) 2024-12-16T21:00:50,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741893_1069 (size=5288) 2024-12-16T21:00:50,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741893_1069 (size=5288) 2024-12-16T21:00:50,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741894_1070 (size=17419) 2024-12-16T21:00:50,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741894_1070 (size=17419) 2024-12-16T21:00:50,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741894_1070 (size=17419) 2024-12-16T21:00:50,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741895_1071 (size=464) 2024-12-16T21:00:50,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741895_1071 (size=464) 2024-12-16T21:00:50,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741895_1071 (size=464) 2024-12-16T21:00:50,574 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-0_0/usercache/jenkins/appcache/application_1734382828217_0001/container_1734382828217_0001_01_000002/launch_container.sh] 2024-12-16T21:00:50,574 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-0_0/usercache/jenkins/appcache/application_1734382828217_0001/container_1734382828217_0001_01_000002/container_tokens] 2024-12-16T21:00:50,574 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-0_0/usercache/jenkins/appcache/application_1734382828217_0001/container_1734382828217_0001_01_000002/sysfs] 2024-12-16T21:00:50,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741896_1072 (size=17419) 2024-12-16T21:00:50,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741896_1072 (size=17419) 2024-12-16T21:00:50,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741896_1072 (size=17419) 2024-12-16T21:00:50,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741897_1073 (size=350562) 2024-12-16T21:00:50,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741897_1073 (size=350562) 2024-12-16T21:00:50,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741897_1073 (size=350562) 2024-12-16T21:00:52,625 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-16T21:00:52,626 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-16T21:00:52,633 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: testExportWithTargetName 2024-12-16T21:00:52,634 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-16T21:00:52,634 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-16T21:00:52,634 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-715246216_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testExportWithTargetName at hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-16T21:00:52,635 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testExportWithTargetName/.snapshotinfo 2024-12-16T21:00:52,635 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testExportWithTargetName/data.manifest 2024-12-16T21:00:52,635 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-715246216_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382832752/.hbase-snapshot/testExportWithTargetName at hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382832752/.hbase-snapshot/testExportWithTargetName 2024-12-16T21:00:52,636 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382832752/.hbase-snapshot/testExportWithTargetName/.snapshotinfo 2024-12-16T21:00:52,636 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382832752/.hbase-snapshot/testExportWithTargetName/data.manifest 2024-12-16T21:00:52,645 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportWithTargetName 2024-12-16T21:00:52,649 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportWithTargetName 2024-12-16T21:00:52,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=23, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportWithTargetName 2024-12-16T21:00:52,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=23 2024-12-16T21:00:52,658 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382852658"}]},"ts":"1734382852658"} 2024-12-16T21:00:52,660 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithTargetName, state=DISABLING in hbase:meta 2024-12-16T21:00:52,663 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithTargetName to state=DISABLING 2024-12-16T21:00:52,665 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=24, ppid=23, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportWithTargetName}] 2024-12-16T21:00:52,671 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=3c10ba856da399deffd7d994922192f1, UNASSIGN}, {pid=26, ppid=24, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=45a6ef929e2b00d4f529d744a8eceb56, UNASSIGN}] 2024-12-16T21:00:52,672 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=26, ppid=24, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=45a6ef929e2b00d4f529d744a8eceb56, UNASSIGN 2024-12-16T21:00:52,672 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=25, ppid=24, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=3c10ba856da399deffd7d994922192f1, UNASSIGN 2024-12-16T21:00:52,673 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=26 updating hbase:meta row=45a6ef929e2b00d4f529d744a8eceb56, regionState=CLOSING, regionLocation=6b259cf15929,41713,1734382821274 2024-12-16T21:00:52,673 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=25 updating hbase:meta row=3c10ba856da399deffd7d994922192f1, regionState=CLOSING, regionLocation=6b259cf15929,36861,1734382821216 2024-12-16T21:00:52,675 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T21:00:52,675 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; CloseRegionProcedure 45a6ef929e2b00d4f529d744a8eceb56, server=6b259cf15929,41713,1734382821274}] 2024-12-16T21:00:52,676 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T21:00:52,679 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=28, ppid=25, state=RUNNABLE; CloseRegionProcedure 3c10ba856da399deffd7d994922192f1, server=6b259cf15929,36861,1734382821216}] 2024-12-16T21:00:52,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=23 2024-12-16T21:00:52,831 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6b259cf15929,41713,1734382821274 2024-12-16T21:00:52,831 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6b259cf15929,36861,1734382821216 2024-12-16T21:00:52,833 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] handler.UnassignRegionHandler(124): Close 3c10ba856da399deffd7d994922192f1 2024-12-16T21:00:52,833 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] handler.UnassignRegionHandler(124): Close 45a6ef929e2b00d4f529d744a8eceb56 2024-12-16T21:00:52,833 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T21:00:52,833 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T21:00:52,834 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1681): Closing 3c10ba856da399deffd7d994922192f1, disabling compactions & flushes 2024-12-16T21:00:52,834 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1681): Closing 45a6ef929e2b00d4f529d744a8eceb56, disabling compactions & flushes 2024-12-16T21:00:52,834 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1703): Closing region testtb-testExportWithTargetName,,1734382830223.3c10ba856da399deffd7d994922192f1. 2024-12-16T21:00:52,834 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1703): Closing region testtb-testExportWithTargetName,1,1734382830223.45a6ef929e2b00d4f529d744a8eceb56. 2024-12-16T21:00:52,834 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithTargetName,,1734382830223.3c10ba856da399deffd7d994922192f1. 2024-12-16T21:00:52,834 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithTargetName,1,1734382830223.45a6ef929e2b00d4f529d744a8eceb56. 2024-12-16T21:00:52,834 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithTargetName,,1734382830223.3c10ba856da399deffd7d994922192f1. after waiting 0 ms 2024-12-16T21:00:52,834 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithTargetName,1,1734382830223.45a6ef929e2b00d4f529d744a8eceb56. after waiting 0 ms 2024-12-16T21:00:52,834 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithTargetName,,1734382830223.3c10ba856da399deffd7d994922192f1. 2024-12-16T21:00:52,834 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithTargetName,1,1734382830223.45a6ef929e2b00d4f529d744a8eceb56. 2024-12-16T21:00:52,841 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithTargetName/3c10ba856da399deffd7d994922192f1/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T21:00:52,843 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithTargetName/45a6ef929e2b00d4f529d744a8eceb56/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T21:00:52,845 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T21:00:52,845 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T21:00:52,845 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1922): Closed testtb-testExportWithTargetName,,1734382830223.3c10ba856da399deffd7d994922192f1. 2024-12-16T21:00:52,845 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1922): Closed testtb-testExportWithTargetName,1,1734382830223.45a6ef929e2b00d4f529d744a8eceb56. 2024-12-16T21:00:52,845 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1635): Region close journal for 3c10ba856da399deffd7d994922192f1: 2024-12-16T21:00:52,845 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1635): Region close journal for 45a6ef929e2b00d4f529d744a8eceb56: 2024-12-16T21:00:52,847 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] handler.UnassignRegionHandler(170): Closed 3c10ba856da399deffd7d994922192f1 2024-12-16T21:00:52,848 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=25 updating hbase:meta row=3c10ba856da399deffd7d994922192f1, regionState=CLOSED 2024-12-16T21:00:52,848 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] handler.UnassignRegionHandler(170): Closed 45a6ef929e2b00d4f529d744a8eceb56 2024-12-16T21:00:52,849 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=26 updating hbase:meta row=45a6ef929e2b00d4f529d744a8eceb56, regionState=CLOSED 2024-12-16T21:00:52,853 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=28, resume processing ppid=25 2024-12-16T21:00:52,853 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, ppid=25, state=SUCCESS; CloseRegionProcedure 3c10ba856da399deffd7d994922192f1, server=6b259cf15929,36861,1734382821216 in 174 msec 2024-12-16T21:00:52,854 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-12-16T21:00:52,854 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; CloseRegionProcedure 45a6ef929e2b00d4f529d744a8eceb56, server=6b259cf15929,41713,1734382821274 in 177 msec 2024-12-16T21:00:52,855 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=3c10ba856da399deffd7d994922192f1, UNASSIGN in 183 msec 2024-12-16T21:00:52,857 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=26, resume processing ppid=24 2024-12-16T21:00:52,857 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, ppid=24, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=45a6ef929e2b00d4f529d744a8eceb56, UNASSIGN in 183 msec 2024-12-16T21:00:52,861 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=24, resume processing ppid=23 2024-12-16T21:00:52,861 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, ppid=23, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportWithTargetName in 193 msec 2024-12-16T21:00:52,863 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382852863"}]},"ts":"1734382852863"} 2024-12-16T21:00:52,865 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithTargetName, state=DISABLED in hbase:meta 2024-12-16T21:00:52,866 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-16T21:00:52,867 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithTargetName to state=DISABLED 2024-12-16T21:00:52,868 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41832, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-16T21:00:52,872 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, state=SUCCESS; DisableTableProcedure table=testtb-testExportWithTargetName in 217 msec 2024-12-16T21:00:52,872 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-16T21:00:52,873 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41834, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-16T21:00:52,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=23 2024-12-16T21:00:52,962 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportWithTargetName, procId: 23 completed 2024-12-16T21:00:52,966 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportWithTargetName 2024-12-16T21:00:52,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=29, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-16T21:00:52,973 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=29, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-16T21:00:52,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportWithTargetName 2024-12-16T21:00:52,976 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=29, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-16T21:00:52,978 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42323 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportWithTargetName 2024-12-16T21:00:52,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-16T21:00:52,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-16T21:00:52,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-16T21:00:52,983 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-16T21:00:52,983 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-16T21:00:52,983 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-16T21:00:52,983 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-16T21:00:52,985 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithTargetName/3c10ba856da399deffd7d994922192f1 2024-12-16T21:00:52,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-16T21:00:52,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:00:52,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-16T21:00:52,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:00:52,986 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data null 2024-12-16T21:00:52,986 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-16T21:00:52,986 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:00:52,986 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-16T21:00:52,986 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:00:52,987 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithTargetName/45a6ef929e2b00d4f529d744a8eceb56 2024-12-16T21:00:52,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=29 2024-12-16T21:00:52,989 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithTargetName/3c10ba856da399deffd7d994922192f1/cf, FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithTargetName/3c10ba856da399deffd7d994922192f1/recovered.edits] 2024-12-16T21:00:52,989 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithTargetName/45a6ef929e2b00d4f529d744a8eceb56/cf, FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithTargetName/45a6ef929e2b00d4f529d744a8eceb56/recovered.edits] 2024-12-16T21:00:52,997 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithTargetName/3c10ba856da399deffd7d994922192f1/cf/8e1dbab159cc463790c3097a616b7287 to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testExportWithTargetName/3c10ba856da399deffd7d994922192f1/cf/8e1dbab159cc463790c3097a616b7287 2024-12-16T21:00:52,997 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithTargetName/45a6ef929e2b00d4f529d744a8eceb56/cf/ff6d166c49484871a1a06abd815fc1ed to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testExportWithTargetName/45a6ef929e2b00d4f529d744a8eceb56/cf/ff6d166c49484871a1a06abd815fc1ed 2024-12-16T21:00:53,001 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithTargetName/45a6ef929e2b00d4f529d744a8eceb56/recovered.edits/9.seqid to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testExportWithTargetName/45a6ef929e2b00d4f529d744a8eceb56/recovered.edits/9.seqid 2024-12-16T21:00:53,001 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithTargetName/3c10ba856da399deffd7d994922192f1/recovered.edits/9.seqid to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testExportWithTargetName/3c10ba856da399deffd7d994922192f1/recovered.edits/9.seqid 2024-12-16T21:00:53,002 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithTargetName/45a6ef929e2b00d4f529d744a8eceb56 2024-12-16T21:00:53,002 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithTargetName/3c10ba856da399deffd7d994922192f1 2024-12-16T21:00:53,002 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithTargetName regions 2024-12-16T21:00:53,005 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=29, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-16T21:00:53,010 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41713 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-16T21:00:53,017 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportWithTargetName from hbase:meta 2024-12-16T21:00:53,021 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportWithTargetName' descriptor. 2024-12-16T21:00:53,022 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=29, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-16T21:00:53,022 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportWithTargetName' from region states. 2024-12-16T21:00:53,023 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,,1734382830223.3c10ba856da399deffd7d994922192f1.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734382853022"}]},"ts":"9223372036854775807"} 2024-12-16T21:00:53,023 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,1,1734382830223.45a6ef929e2b00d4f529d744a8eceb56.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734382853022"}]},"ts":"9223372036854775807"} 2024-12-16T21:00:53,025 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-16T21:00:53,025 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 3c10ba856da399deffd7d994922192f1, NAME => 'testtb-testExportWithTargetName,,1734382830223.3c10ba856da399deffd7d994922192f1.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 45a6ef929e2b00d4f529d744a8eceb56, NAME => 'testtb-testExportWithTargetName,1,1734382830223.45a6ef929e2b00d4f529d744a8eceb56.', STARTKEY => '1', ENDKEY => ''}] 2024-12-16T21:00:53,026 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportWithTargetName' as deleted. 2024-12-16T21:00:53,026 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734382853026"}]},"ts":"9223372036854775807"} 2024-12-16T21:00:53,028 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportWithTargetName state from META 2024-12-16T21:00:53,031 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=29, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-16T21:00:53,032 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, state=SUCCESS; DeleteTableProcedure table=testtb-testExportWithTargetName in 64 msec 2024-12-16T21:00:53,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=29 2024-12-16T21:00:53,089 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportWithTargetName, procId: 29 completed 2024-12-16T21:00:53,105 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithTargetName" 2024-12-16T21:00:53,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportWithTargetName 2024-12-16T21:00:53,110 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithTargetName" 2024-12-16T21:00:53,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportWithTargetName 2024-12-16T21:00:53,146 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=774 (was 723) Potentially hanging thread: IPC Client (1891196410) connection to localhost/127.0.0.1:43349 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-832448423_1 at /127.0.0.1:37438 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-4-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x70b78df7-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1891196410) connection to localhost/127.0.0.1:43801 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-715246216_22 at /127.0.0.1:37076 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: htable-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x70b78df7-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x70b78df7-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-4-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/6b259cf15929:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-715246216_22 at /127.0.0.1:37460 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43349 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38455 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 22129) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: htable-pool-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-1298 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x70b78df7-shared-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43801 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-832448423_1 at /127.0.0.1:39242 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-715246216_22 at /127.0.0.1:39280 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/6b259cf15929:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Container metrics unregistration java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=805 (was 783) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=216 (was 152) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=4736 (was 6269) 2024-12-16T21:00:53,146 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=774 is superior to 500 2024-12-16T21:00:53,166 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=774, OpenFileDescriptor=805, MaxFileDescriptor=1048576, SystemLoadAverage=216, ProcessCount=17, AvailableMemoryMB=4735 2024-12-16T21:00:53,167 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=774 is superior to 500 2024-12-16T21:00:53,169 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T21:00:53,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportWithResetTtl 2024-12-16T21:00:53,171 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T21:00:53,171 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:00:53,171 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithResetTtl" procId is: 30 2024-12-16T21:00:53,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-16T21:00:53,172 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T21:00:53,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741898_1074 (size=404) 2024-12-16T21:00:53,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741898_1074 (size=404) 2024-12-16T21:00:53,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741898_1074 (size=404) 2024-12-16T21:00:53,183 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 4692eddc5cc4a103c5b809ba757d08ab, NAME => 'testtb-testExportWithResetTtl,,1734382853168.4692eddc5cc4a103c5b809ba757d08ab.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:00:53,184 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 700f181be5e8b4d3e492309ab68f9d3d, NAME => 'testtb-testExportWithResetTtl,1,1734382853168.700f181be5e8b4d3e492309ab68f9d3d.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:00:53,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741899_1075 (size=65) 2024-12-16T21:00:53,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741899_1075 (size=65) 2024-12-16T21:00:53,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741899_1075 (size=65) 2024-12-16T21:00:53,193 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithResetTtl,,1734382853168.4692eddc5cc4a103c5b809ba757d08ab.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:00:53,193 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1681): Closing 4692eddc5cc4a103c5b809ba757d08ab, disabling compactions & flushes 2024-12-16T21:00:53,193 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithResetTtl,,1734382853168.4692eddc5cc4a103c5b809ba757d08ab. 2024-12-16T21:00:53,193 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithResetTtl,,1734382853168.4692eddc5cc4a103c5b809ba757d08ab. 2024-12-16T21:00:53,193 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithResetTtl,,1734382853168.4692eddc5cc4a103c5b809ba757d08ab. after waiting 0 ms 2024-12-16T21:00:53,193 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithResetTtl,,1734382853168.4692eddc5cc4a103c5b809ba757d08ab. 2024-12-16T21:00:53,193 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportWithResetTtl,,1734382853168.4692eddc5cc4a103c5b809ba757d08ab. 2024-12-16T21:00:53,193 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1635): Region close journal for 4692eddc5cc4a103c5b809ba757d08ab: 2024-12-16T21:00:53,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741900_1076 (size=65) 2024-12-16T21:00:53,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741900_1076 (size=65) 2024-12-16T21:00:53,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741900_1076 (size=65) 2024-12-16T21:00:53,198 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithResetTtl,1,1734382853168.700f181be5e8b4d3e492309ab68f9d3d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:00:53,198 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1681): Closing 700f181be5e8b4d3e492309ab68f9d3d, disabling compactions & flushes 2024-12-16T21:00:53,198 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithResetTtl,1,1734382853168.700f181be5e8b4d3e492309ab68f9d3d. 2024-12-16T21:00:53,198 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithResetTtl,1,1734382853168.700f181be5e8b4d3e492309ab68f9d3d. 2024-12-16T21:00:53,198 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithResetTtl,1,1734382853168.700f181be5e8b4d3e492309ab68f9d3d. after waiting 0 ms 2024-12-16T21:00:53,198 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithResetTtl,1,1734382853168.700f181be5e8b4d3e492309ab68f9d3d. 2024-12-16T21:00:53,198 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportWithResetTtl,1,1734382853168.700f181be5e8b4d3e492309ab68f9d3d. 2024-12-16T21:00:53,199 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1635): Region close journal for 700f181be5e8b4d3e492309ab68f9d3d: 2024-12-16T21:00:53,200 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T21:00:53,200 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,,1734382853168.4692eddc5cc4a103c5b809ba757d08ab.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1734382853200"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734382853200"}]},"ts":"1734382853200"} 2024-12-16T21:00:53,200 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,1,1734382853168.700f181be5e8b4d3e492309ab68f9d3d.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1734382853200"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734382853200"}]},"ts":"1734382853200"} 2024-12-16T21:00:53,203 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-16T21:00:53,204 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T21:00:53,204 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382853204"}]},"ts":"1734382853204"} 2024-12-16T21:00:53,206 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-16T21:00:53,210 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(202): Hosts are {6b259cf15929=0} racks are {/default-rack=0} 2024-12-16T21:00:53,211 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-16T21:00:53,211 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-16T21:00:53,211 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-16T21:00:53,211 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-16T21:00:53,211 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-16T21:00:53,211 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-16T21:00:53,211 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-16T21:00:53,211 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=4692eddc5cc4a103c5b809ba757d08ab, ASSIGN}, {pid=32, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=700f181be5e8b4d3e492309ab68f9d3d, ASSIGN}] 2024-12-16T21:00:53,213 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=32, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=700f181be5e8b4d3e492309ab68f9d3d, ASSIGN 2024-12-16T21:00:53,213 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=31, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=4692eddc5cc4a103c5b809ba757d08ab, ASSIGN 2024-12-16T21:00:53,214 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=32, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=700f181be5e8b4d3e492309ab68f9d3d, ASSIGN; state=OFFLINE, location=6b259cf15929,36861,1734382821216; forceNewPlan=false, retain=false 2024-12-16T21:00:53,214 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=31, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=4692eddc5cc4a103c5b809ba757d08ab, ASSIGN; state=OFFLINE, location=6b259cf15929,41713,1734382821274; forceNewPlan=false, retain=false 2024-12-16T21:00:53,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-16T21:00:53,364 INFO [6b259cf15929:34343 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-16T21:00:53,365 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=31 updating hbase:meta row=4692eddc5cc4a103c5b809ba757d08ab, regionState=OPENING, regionLocation=6b259cf15929,41713,1734382821274 2024-12-16T21:00:53,365 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=700f181be5e8b4d3e492309ab68f9d3d, regionState=OPENING, regionLocation=6b259cf15929,36861,1734382821216 2024-12-16T21:00:53,367 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=31, state=RUNNABLE; OpenRegionProcedure 4692eddc5cc4a103c5b809ba757d08ab, server=6b259cf15929,41713,1734382821274}] 2024-12-16T21:00:53,368 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=34, ppid=32, state=RUNNABLE; OpenRegionProcedure 700f181be5e8b4d3e492309ab68f9d3d, server=6b259cf15929,36861,1734382821216}] 2024-12-16T21:00:53,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-16T21:00:53,519 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6b259cf15929,41713,1734382821274 2024-12-16T21:00:53,520 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6b259cf15929,36861,1734382821216 2024-12-16T21:00:53,523 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] handler.AssignRegionHandler(135): Open testtb-testExportWithResetTtl,,1734382853168.4692eddc5cc4a103c5b809ba757d08ab. 2024-12-16T21:00:53,523 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] handler.AssignRegionHandler(135): Open testtb-testExportWithResetTtl,1,1734382853168.700f181be5e8b4d3e492309ab68f9d3d. 2024-12-16T21:00:53,524 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7285): Opening region: {ENCODED => 4692eddc5cc4a103c5b809ba757d08ab, NAME => 'testtb-testExportWithResetTtl,,1734382853168.4692eddc5cc4a103c5b809ba757d08ab.', STARTKEY => '', ENDKEY => '1'} 2024-12-16T21:00:53,524 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7285): Opening region: {ENCODED => 700f181be5e8b4d3e492309ab68f9d3d, NAME => 'testtb-testExportWithResetTtl,1,1734382853168.700f181be5e8b4d3e492309ab68f9d3d.', STARTKEY => '1', ENDKEY => ''} 2024-12-16T21:00:53,524 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithResetTtl,,1734382853168.4692eddc5cc4a103c5b809ba757d08ab. service=AccessControlService 2024-12-16T21:00:53,524 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithResetTtl,1,1734382853168.700f181be5e8b4d3e492309ab68f9d3d. service=AccessControlService 2024-12-16T21:00:53,524 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T21:00:53,524 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T21:00:53,524 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 4692eddc5cc4a103c5b809ba757d08ab 2024-12-16T21:00:53,524 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(894): Instantiated testtb-testExportWithResetTtl,,1734382853168.4692eddc5cc4a103c5b809ba757d08ab.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:00:53,524 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 700f181be5e8b4d3e492309ab68f9d3d 2024-12-16T21:00:53,525 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(894): Instantiated testtb-testExportWithResetTtl,1,1734382853168.700f181be5e8b4d3e492309ab68f9d3d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:00:53,525 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7327): checking encryption for 700f181be5e8b4d3e492309ab68f9d3d 2024-12-16T21:00:53,525 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7327): checking encryption for 4692eddc5cc4a103c5b809ba757d08ab 2024-12-16T21:00:53,525 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7330): checking classloading for 700f181be5e8b4d3e492309ab68f9d3d 2024-12-16T21:00:53,525 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7330): checking classloading for 4692eddc5cc4a103c5b809ba757d08ab 2024-12-16T21:00:53,526 INFO [StoreOpener-700f181be5e8b4d3e492309ab68f9d3d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 700f181be5e8b4d3e492309ab68f9d3d 2024-12-16T21:00:53,526 INFO [StoreOpener-4692eddc5cc4a103c5b809ba757d08ab-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 4692eddc5cc4a103c5b809ba757d08ab 2024-12-16T21:00:53,528 INFO [StoreOpener-700f181be5e8b4d3e492309ab68f9d3d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 700f181be5e8b4d3e492309ab68f9d3d columnFamilyName cf 2024-12-16T21:00:53,528 DEBUG [StoreOpener-700f181be5e8b4d3e492309ab68f9d3d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:00:53,529 INFO [StoreOpener-4692eddc5cc4a103c5b809ba757d08ab-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4692eddc5cc4a103c5b809ba757d08ab columnFamilyName cf 2024-12-16T21:00:53,529 INFO [StoreOpener-700f181be5e8b4d3e492309ab68f9d3d-1 {}] regionserver.HStore(327): Store=700f181be5e8b4d3e492309ab68f9d3d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T21:00:53,529 DEBUG [StoreOpener-4692eddc5cc4a103c5b809ba757d08ab-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:00:53,529 INFO [StoreOpener-4692eddc5cc4a103c5b809ba757d08ab-1 {}] regionserver.HStore(327): Store=4692eddc5cc4a103c5b809ba757d08ab/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T21:00:53,530 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithResetTtl/700f181be5e8b4d3e492309ab68f9d3d 2024-12-16T21:00:53,530 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithResetTtl/700f181be5e8b4d3e492309ab68f9d3d 2024-12-16T21:00:53,530 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithResetTtl/4692eddc5cc4a103c5b809ba757d08ab 2024-12-16T21:00:53,531 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithResetTtl/4692eddc5cc4a103c5b809ba757d08ab 2024-12-16T21:00:53,533 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(1085): writing seq id for 700f181be5e8b4d3e492309ab68f9d3d 2024-12-16T21:00:53,533 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1085): writing seq id for 4692eddc5cc4a103c5b809ba757d08ab 2024-12-16T21:00:53,535 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithResetTtl/700f181be5e8b4d3e492309ab68f9d3d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T21:00:53,535 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithResetTtl/4692eddc5cc4a103c5b809ba757d08ab/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T21:00:53,536 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1102): Opened 4692eddc5cc4a103c5b809ba757d08ab; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61054675, jitterRate=-0.09021444618701935}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T21:00:53,536 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(1102): Opened 700f181be5e8b4d3e492309ab68f9d3d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74648359, jitterRate=0.11234723031520844}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T21:00:53,538 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(1001): Region open journal for 700f181be5e8b4d3e492309ab68f9d3d: 2024-12-16T21:00:53,538 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1001): Region open journal for 4692eddc5cc4a103c5b809ba757d08ab: 2024-12-16T21:00:53,539 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithResetTtl,1,1734382853168.700f181be5e8b4d3e492309ab68f9d3d., pid=34, masterSystemTime=1734382853520 2024-12-16T21:00:53,539 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithResetTtl,,1734382853168.4692eddc5cc4a103c5b809ba757d08ab., pid=33, masterSystemTime=1734382853519 2024-12-16T21:00:53,541 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithResetTtl,1,1734382853168.700f181be5e8b4d3e492309ab68f9d3d. 2024-12-16T21:00:53,541 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] handler.AssignRegionHandler(164): Opened testtb-testExportWithResetTtl,1,1734382853168.700f181be5e8b4d3e492309ab68f9d3d. 2024-12-16T21:00:53,541 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=700f181be5e8b4d3e492309ab68f9d3d, regionState=OPEN, openSeqNum=2, regionLocation=6b259cf15929,36861,1734382821216 2024-12-16T21:00:53,541 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithResetTtl,,1734382853168.4692eddc5cc4a103c5b809ba757d08ab. 2024-12-16T21:00:53,541 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] handler.AssignRegionHandler(164): Opened testtb-testExportWithResetTtl,,1734382853168.4692eddc5cc4a103c5b809ba757d08ab. 2024-12-16T21:00:53,542 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=31 updating hbase:meta row=4692eddc5cc4a103c5b809ba757d08ab, regionState=OPEN, openSeqNum=2, regionLocation=6b259cf15929,41713,1734382821274 2024-12-16T21:00:53,553 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=34, resume processing ppid=32 2024-12-16T21:00:53,554 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, ppid=32, state=SUCCESS; OpenRegionProcedure 700f181be5e8b4d3e492309ab68f9d3d, server=6b259cf15929,36861,1734382821216 in 175 msec 2024-12-16T21:00:53,554 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=31 2024-12-16T21:00:53,555 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=31, state=SUCCESS; OpenRegionProcedure 4692eddc5cc4a103c5b809ba757d08ab, server=6b259cf15929,41713,1734382821274 in 179 msec 2024-12-16T21:00:53,555 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, ppid=30, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=700f181be5e8b4d3e492309ab68f9d3d, ASSIGN in 342 msec 2024-12-16T21:00:53,557 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-12-16T21:00:53,557 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=4692eddc5cc4a103c5b809ba757d08ab, ASSIGN in 343 msec 2024-12-16T21:00:53,557 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T21:00:53,558 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382853557"}]},"ts":"1734382853557"} 2024-12-16T21:00:53,559 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-16T21:00:53,563 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T21:00:53,563 DEBUG [PEWorker-5 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportWithResetTtl jenkins: RWXCA 2024-12-16T21:00:53,565 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42323 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-16T21:00:53,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:00:53,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:00:53,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:00:53,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:00:53,570 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-16T21:00:53,570 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-16T21:00:53,570 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-16T21:00:53,571 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-16T21:00:53,572 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; CreateTableProcedure table=testtb-testExportWithResetTtl in 400 msec 2024-12-16T21:00:53,681 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithResetTtl' 2024-12-16T21:00:53,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-16T21:00:53,777 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportWithResetTtl, procId: 30 completed 2024-12-16T21:00:53,777 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportWithResetTtl get assigned. Timeout = 60000ms 2024-12-16T21:00:53,777 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T21:00:53,781 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportWithResetTtl assigned to meta. Checking AM states. 2024-12-16T21:00:53,782 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T21:00:53,782 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportWithResetTtl assigned. 2024-12-16T21:00:53,785 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-16T21:00:53,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734382853785 (current time:1734382853785). 2024-12-16T21:00:53,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-16T21:00:53,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-16T21:00:53,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T21:00:53,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0819436b to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4f122bf0 2024-12-16T21:00:53,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2fbcdfa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:00:53,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:00:53,793 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54246, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:00:53,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0819436b to 127.0.0.1:61742 2024-12-16T21:00:53,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:00:53,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x50368a77 to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@e25662a 2024-12-16T21:00:53,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@526dee09, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:00:53,932 DEBUG [hconnection-0x74b74154-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:00:53,933 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54256, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:00:53,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:00:53,936 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39736, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:00:53,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x50368a77 to 127.0.0.1:61742 2024-12-16T21:00:53,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:00:53,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-16T21:00:53,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T21:00:53,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=35, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-16T21:00:53,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 35 2024-12-16T21:00:53,941 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-16T21:00:53,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-16T21:00:53,942 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T21:00:53,947 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T21:00:53,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741901_1077 (size=161) 2024-12-16T21:00:53,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741901_1077 (size=161) 2024-12-16T21:00:53,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741901_1077 (size=161) 2024-12-16T21:00:54,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-16T21:00:54,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-16T21:00:54,369 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T21:00:54,370 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=36, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 4692eddc5cc4a103c5b809ba757d08ab}, {pid=37, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 700f181be5e8b4d3e492309ab68f9d3d}] 2024-12-16T21:00:54,371 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=37, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 700f181be5e8b4d3e492309ab68f9d3d 2024-12-16T21:00:54,371 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 4692eddc5cc4a103c5b809ba757d08ab 2024-12-16T21:00:54,522 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6b259cf15929,36861,1734382821216 2024-12-16T21:00:54,523 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36861 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=37 2024-12-16T21:00:54,524 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1734382853168.700f181be5e8b4d3e492309ab68f9d3d. 2024-12-16T21:00:54,525 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6b259cf15929,41713,1734382821274 2024-12-16T21:00:54,525 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.HRegion(2538): Flush status journal for 700f181be5e8b4d3e492309ab68f9d3d: 2024-12-16T21:00:54,525 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1734382853168.700f181be5e8b4d3e492309ab68f9d3d. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-16T21:00:54,525 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1734382853168.700f181be5e8b4d3e492309ab68f9d3d.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-16T21:00:54,525 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T21:00:54,525 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41713 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=36 2024-12-16T21:00:54,525 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-16T21:00:54,526 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1734382853168.4692eddc5cc4a103c5b809ba757d08ab. 2024-12-16T21:00:54,526 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.HRegion(2538): Flush status journal for 4692eddc5cc4a103c5b809ba757d08ab: 2024-12-16T21:00:54,526 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1734382853168.4692eddc5cc4a103c5b809ba757d08ab. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-16T21:00:54,526 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1734382853168.4692eddc5cc4a103c5b809ba757d08ab.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-16T21:00:54,526 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T21:00:54,527 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-16T21:00:54,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741903_1079 (size=68) 2024-12-16T21:00:54,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741903_1079 (size=68) 2024-12-16T21:00:54,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-16T21:00:54,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741903_1079 (size=68) 2024-12-16T21:00:54,546 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1734382853168.700f181be5e8b4d3e492309ab68f9d3d. 2024-12-16T21:00:54,546 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=37 2024-12-16T21:00:54,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster(4106): Remote procedure done, pid=37 2024-12-16T21:00:54,547 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 700f181be5e8b4d3e492309ab68f9d3d 2024-12-16T21:00:54,547 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=37, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 700f181be5e8b4d3e492309ab68f9d3d 2024-12-16T21:00:54,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741902_1078 (size=68) 2024-12-16T21:00:54,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741902_1078 (size=68) 2024-12-16T21:00:54,551 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=35, state=SUCCESS; SnapshotRegionProcedure 700f181be5e8b4d3e492309ab68f9d3d in 180 msec 2024-12-16T21:00:54,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741902_1078 (size=68) 2024-12-16T21:00:54,554 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1734382853168.4692eddc5cc4a103c5b809ba757d08ab. 2024-12-16T21:00:54,554 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=36 2024-12-16T21:00:54,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster(4106): Remote procedure done, pid=36 2024-12-16T21:00:54,555 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 4692eddc5cc4a103c5b809ba757d08ab 2024-12-16T21:00:54,555 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 4692eddc5cc4a103c5b809ba757d08ab 2024-12-16T21:00:54,558 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=36, resume processing ppid=35 2024-12-16T21:00:54,558 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, ppid=35, state=SUCCESS; SnapshotRegionProcedure 4692eddc5cc4a103c5b809ba757d08ab in 187 msec 2024-12-16T21:00:54,558 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T21:00:54,559 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T21:00:54,560 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T21:00:54,560 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithResetTtl 2024-12-16T21:00:54,561 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl 2024-12-16T21:00:54,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741904_1080 (size=543) 2024-12-16T21:00:54,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741904_1080 (size=543) 2024-12-16T21:00:54,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741904_1080 (size=543) 2024-12-16T21:00:54,580 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T21:00:54,586 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T21:00:54,587 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/emptySnaptb0-testExportWithResetTtl 2024-12-16T21:00:54,589 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T21:00:54,589 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 35 2024-12-16T21:00:54,591 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 650 msec 2024-12-16T21:00:55,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-16T21:00:55,048 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl, procId: 35 completed 2024-12-16T21:00:55,056 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41713 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithResetTtl,,1734382853168.4692eddc5cc4a103c5b809ba757d08ab. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T21:00:55,059 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithResetTtl,1,1734382853168.700f181be5e8b4d3e492309ab68f9d3d. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T21:00:55,068 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportWithResetTtl 2024-12-16T21:00:55,068 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportWithResetTtl,,1734382853168.4692eddc5cc4a103c5b809ba757d08ab. 2024-12-16T21:00:55,068 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T21:00:55,083 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-16T21:00:55,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734382855083 (current time:1734382855083). 2024-12-16T21:00:55,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-16T21:00:55,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-16T21:00:55,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T21:00:55,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x08a79592 to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4654d833 2024-12-16T21:00:55,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6686824e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:00:55,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:00:55,091 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54262, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:00:55,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x08a79592 to 127.0.0.1:61742 2024-12-16T21:00:55,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:00:55,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6c45b1dc to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4ec01e82 2024-12-16T21:00:55,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41842f48, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:00:55,104 DEBUG [hconnection-0xf7e0df4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:00:55,105 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54268, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:00:55,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:00:55,108 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39746, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:00:55,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6c45b1dc to 127.0.0.1:61742 2024-12-16T21:00:55,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:00:55,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-16T21:00:55,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T21:00:55,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=38, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-16T21:00:55,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 38 2024-12-16T21:00:55,116 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-16T21:00:55,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-16T21:00:55,117 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T21:00:55,119 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T21:00:55,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741905_1081 (size=156) 2024-12-16T21:00:55,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741905_1081 (size=156) 2024-12-16T21:00:55,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741905_1081 (size=156) 2024-12-16T21:00:55,136 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T21:00:55,136 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 4692eddc5cc4a103c5b809ba757d08ab}, {pid=40, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 700f181be5e8b4d3e492309ab68f9d3d}] 2024-12-16T21:00:55,138 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 700f181be5e8b4d3e492309ab68f9d3d 2024-12-16T21:00:55,138 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=39, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 4692eddc5cc4a103c5b809ba757d08ab 2024-12-16T21:00:55,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-16T21:00:55,289 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6b259cf15929,41713,1734382821274 2024-12-16T21:00:55,289 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6b259cf15929,36861,1734382821216 2024-12-16T21:00:55,291 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41713 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=39 2024-12-16T21:00:55,291 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1734382853168.4692eddc5cc4a103c5b809ba757d08ab. 2024-12-16T21:00:55,291 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36861 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=40 2024-12-16T21:00:55,291 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1734382853168.700f181be5e8b4d3e492309ab68f9d3d. 2024-12-16T21:00:55,291 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HRegion(2837): Flushing 4692eddc5cc4a103c5b809ba757d08ab 1/1 column families, dataSize=467 B heapSize=1.23 KB 2024-12-16T21:00:55,292 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HRegion(2837): Flushing 700f181be5e8b4d3e492309ab68f9d3d 1/1 column families, dataSize=2.80 KB heapSize=6.30 KB 2024-12-16T21:00:55,319 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithResetTtl/4692eddc5cc4a103c5b809ba757d08ab/.tmp/cf/2904d7234d5b417690a49480e089382d is 71, key is 0095435a8de2e8377175d5d2f4beaee9/cf:q/1734382855056/Put/seqid=0 2024-12-16T21:00:55,321 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithResetTtl/700f181be5e8b4d3e492309ab68f9d3d/.tmp/cf/f5b8db49da254126939406a31410928b is 71, key is 163f3cb6f2997b73f49f5019455218a4/cf:q/1734382855058/Put/seqid=0 2024-12-16T21:00:55,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741907_1083 (size=8052) 2024-12-16T21:00:55,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741907_1083 (size=8052) 2024-12-16T21:00:55,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741907_1083 (size=8052) 2024-12-16T21:00:55,343 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.80 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithResetTtl/700f181be5e8b4d3e492309ab68f9d3d/.tmp/cf/f5b8db49da254126939406a31410928b 2024-12-16T21:00:55,364 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithResetTtl/700f181be5e8b4d3e492309ab68f9d3d/.tmp/cf/f5b8db49da254126939406a31410928b as hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithResetTtl/700f181be5e8b4d3e492309ab68f9d3d/cf/f5b8db49da254126939406a31410928b 2024-12-16T21:00:55,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741906_1082 (size=5566) 2024-12-16T21:00:55,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741906_1082 (size=5566) 2024-12-16T21:00:55,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741906_1082 (size=5566) 2024-12-16T21:00:55,375 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithResetTtl/700f181be5e8b4d3e492309ab68f9d3d/cf/f5b8db49da254126939406a31410928b, entries=43, sequenceid=6, filesize=7.9 K 2024-12-16T21:00:55,375 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=467 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithResetTtl/4692eddc5cc4a103c5b809ba757d08ab/.tmp/cf/2904d7234d5b417690a49480e089382d 2024-12-16T21:00:55,377 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HRegion(3040): Finished flush of dataSize ~2.80 KB/2869, heapSize ~6.28 KB/6432, currentSize=0 B/0 for 700f181be5e8b4d3e492309ab68f9d3d in 84ms, sequenceid=6, compaction requested=false 2024-12-16T21:00:55,377 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HRegion(2538): Flush status journal for 700f181be5e8b4d3e492309ab68f9d3d: 2024-12-16T21:00:55,377 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1734382853168.700f181be5e8b4d3e492309ab68f9d3d. for snaptb0-testExportWithResetTtl completed. 2024-12-16T21:00:55,377 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1734382853168.700f181be5e8b4d3e492309ab68f9d3d.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-16T21:00:55,377 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T21:00:55,377 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithResetTtl/700f181be5e8b4d3e492309ab68f9d3d/cf/f5b8db49da254126939406a31410928b] hfiles 2024-12-16T21:00:55,377 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithResetTtl/700f181be5e8b4d3e492309ab68f9d3d/cf/f5b8db49da254126939406a31410928b for snapshot=snaptb0-testExportWithResetTtl 2024-12-16T21:00:55,385 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithResetTtl/4692eddc5cc4a103c5b809ba757d08ab/.tmp/cf/2904d7234d5b417690a49480e089382d as hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithResetTtl/4692eddc5cc4a103c5b809ba757d08ab/cf/2904d7234d5b417690a49480e089382d 2024-12-16T21:00:55,392 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithResetTtl/4692eddc5cc4a103c5b809ba757d08ab/cf/2904d7234d5b417690a49480e089382d, entries=7, sequenceid=6, filesize=5.4 K 2024-12-16T21:00:55,394 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HRegion(3040): Finished flush of dataSize ~467 B/467, heapSize ~1.22 KB/1248, currentSize=0 B/0 for 4692eddc5cc4a103c5b809ba757d08ab in 103ms, sequenceid=6, compaction requested=false 2024-12-16T21:00:55,395 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HRegion(2538): Flush status journal for 4692eddc5cc4a103c5b809ba757d08ab: 2024-12-16T21:00:55,395 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1734382853168.4692eddc5cc4a103c5b809ba757d08ab. for snaptb0-testExportWithResetTtl completed. 2024-12-16T21:00:55,395 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1734382853168.4692eddc5cc4a103c5b809ba757d08ab.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-16T21:00:55,395 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T21:00:55,395 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithResetTtl/4692eddc5cc4a103c5b809ba757d08ab/cf/2904d7234d5b417690a49480e089382d] hfiles 2024-12-16T21:00:55,395 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithResetTtl/4692eddc5cc4a103c5b809ba757d08ab/cf/2904d7234d5b417690a49480e089382d for snapshot=snaptb0-testExportWithResetTtl 2024-12-16T21:00:55,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741908_1084 (size=107) 2024-12-16T21:00:55,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741908_1084 (size=107) 2024-12-16T21:00:55,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741908_1084 (size=107) 2024-12-16T21:00:55,419 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1734382853168.700f181be5e8b4d3e492309ab68f9d3d. 2024-12-16T21:00:55,419 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=40 2024-12-16T21:00:55,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster(4106): Remote procedure done, pid=40 2024-12-16T21:00:55,419 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 700f181be5e8b4d3e492309ab68f9d3d 2024-12-16T21:00:55,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-16T21:00:55,419 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 700f181be5e8b4d3e492309ab68f9d3d 2024-12-16T21:00:55,424 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, ppid=38, state=SUCCESS; SnapshotRegionProcedure 700f181be5e8b4d3e492309ab68f9d3d in 285 msec 2024-12-16T21:00:55,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741909_1085 (size=107) 2024-12-16T21:00:55,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741909_1085 (size=107) 2024-12-16T21:00:55,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741909_1085 (size=107) 2024-12-16T21:00:55,434 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1734382853168.4692eddc5cc4a103c5b809ba757d08ab. 2024-12-16T21:00:55,435 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=39 2024-12-16T21:00:55,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.HMaster(4106): Remote procedure done, pid=39 2024-12-16T21:00:55,435 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 4692eddc5cc4a103c5b809ba757d08ab 2024-12-16T21:00:55,435 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=39, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 4692eddc5cc4a103c5b809ba757d08ab 2024-12-16T21:00:55,444 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=39, resume processing ppid=38 2024-12-16T21:00:55,444 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; SnapshotRegionProcedure 4692eddc5cc4a103c5b809ba757d08ab in 305 msec 2024-12-16T21:00:55,444 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T21:00:55,445 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T21:00:55,445 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T21:00:55,446 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithResetTtl 2024-12-16T21:00:55,447 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl 2024-12-16T21:00:55,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741910_1086 (size=621) 2024-12-16T21:00:55,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741910_1086 (size=621) 2024-12-16T21:00:55,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741910_1086 (size=621) 2024-12-16T21:00:55,464 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T21:00:55,470 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T21:00:55,470 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testExportWithResetTtl 2024-12-16T21:00:55,472 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T21:00:55,472 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 38 2024-12-16T21:00:55,473 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 362 msec 2024-12-16T21:00:55,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-16T21:00:55,721 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl, procId: 38 completed 2024-12-16T21:00:55,724 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T21:00:55,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=41, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testExportWithResetTtl 2024-12-16T21:00:55,728 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T21:00:55,728 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:00:55,728 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportWithResetTtl" procId is: 41 2024-12-16T21:00:55,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-16T21:00:55,729 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T21:00:55,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741911_1087 (size=397) 2024-12-16T21:00:55,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741911_1087 (size=397) 2024-12-16T21:00:55,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741911_1087 (size=397) 2024-12-16T21:00:55,760 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 53858618241cf550a0c934f05cd0540f, NAME => 'testExportWithResetTtl,,1734382855723.53858618241cf550a0c934f05cd0540f.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:00:55,761 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 330ebab1e871e4cea2cd8940d534008f, NAME => 'testExportWithResetTtl,1,1734382855723.330ebab1e871e4cea2cd8940d534008f.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:00:55,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741913_1089 (size=58) 2024-12-16T21:00:55,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741912_1088 (size=58) 2024-12-16T21:00:55,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741912_1088 (size=58) 2024-12-16T21:00:55,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741913_1089 (size=58) 2024-12-16T21:00:55,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741913_1089 (size=58) 2024-12-16T21:00:55,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741912_1088 (size=58) 2024-12-16T21:00:55,783 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(894): Instantiated testExportWithResetTtl,1,1734382855723.330ebab1e871e4cea2cd8940d534008f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:00:55,783 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1681): Closing 330ebab1e871e4cea2cd8940d534008f, disabling compactions & flushes 2024-12-16T21:00:55,783 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1703): Closing region testExportWithResetTtl,1,1734382855723.330ebab1e871e4cea2cd8940d534008f. 2024-12-16T21:00:55,783 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportWithResetTtl,1,1734382855723.330ebab1e871e4cea2cd8940d534008f. 2024-12-16T21:00:55,783 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testExportWithResetTtl,1,1734382855723.330ebab1e871e4cea2cd8940d534008f. after waiting 0 ms 2024-12-16T21:00:55,783 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testExportWithResetTtl,1,1734382855723.330ebab1e871e4cea2cd8940d534008f. 2024-12-16T21:00:55,783 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1922): Closed testExportWithResetTtl,1,1734382855723.330ebab1e871e4cea2cd8940d534008f. 2024-12-16T21:00:55,783 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1635): Region close journal for 330ebab1e871e4cea2cd8940d534008f: 2024-12-16T21:00:55,784 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(894): Instantiated testExportWithResetTtl,,1734382855723.53858618241cf550a0c934f05cd0540f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:00:55,784 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1681): Closing 53858618241cf550a0c934f05cd0540f, disabling compactions & flushes 2024-12-16T21:00:55,784 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1703): Closing region testExportWithResetTtl,,1734382855723.53858618241cf550a0c934f05cd0540f. 2024-12-16T21:00:55,784 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportWithResetTtl,,1734382855723.53858618241cf550a0c934f05cd0540f. 2024-12-16T21:00:55,784 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testExportWithResetTtl,,1734382855723.53858618241cf550a0c934f05cd0540f. after waiting 0 ms 2024-12-16T21:00:55,784 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testExportWithResetTtl,,1734382855723.53858618241cf550a0c934f05cd0540f. 2024-12-16T21:00:55,785 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1922): Closed testExportWithResetTtl,,1734382855723.53858618241cf550a0c934f05cd0540f. 2024-12-16T21:00:55,785 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1635): Region close journal for 53858618241cf550a0c934f05cd0540f: 2024-12-16T21:00:55,786 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T21:00:55,786 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testExportWithResetTtl,1,1734382855723.330ebab1e871e4cea2cd8940d534008f.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1734382855786"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734382855786"}]},"ts":"1734382855786"} 2024-12-16T21:00:55,786 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testExportWithResetTtl,,1734382855723.53858618241cf550a0c934f05cd0540f.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1734382855786"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734382855786"}]},"ts":"1734382855786"} 2024-12-16T21:00:55,789 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-16T21:00:55,790 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T21:00:55,790 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382855790"}]},"ts":"1734382855790"} 2024-12-16T21:00:55,791 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-16T21:00:55,797 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(202): Hosts are {6b259cf15929=0} racks are {/default-rack=0} 2024-12-16T21:00:55,798 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-16T21:00:55,798 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-16T21:00:55,799 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-16T21:00:55,799 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-16T21:00:55,799 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-16T21:00:55,799 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-16T21:00:55,799 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-16T21:00:55,799 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportWithResetTtl, region=53858618241cf550a0c934f05cd0540f, ASSIGN}, {pid=43, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportWithResetTtl, region=330ebab1e871e4cea2cd8940d534008f, ASSIGN}] 2024-12-16T21:00:55,800 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=43, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportWithResetTtl, region=330ebab1e871e4cea2cd8940d534008f, ASSIGN 2024-12-16T21:00:55,800 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportWithResetTtl, region=53858618241cf550a0c934f05cd0540f, ASSIGN 2024-12-16T21:00:55,801 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=43, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=330ebab1e871e4cea2cd8940d534008f, ASSIGN; state=OFFLINE, location=6b259cf15929,42323,1734382821139; forceNewPlan=false, retain=false 2024-12-16T21:00:55,801 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=53858618241cf550a0c934f05cd0540f, ASSIGN; state=OFFLINE, location=6b259cf15929,41713,1734382821274; forceNewPlan=false, retain=false 2024-12-16T21:00:55,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-16T21:00:55,952 INFO [6b259cf15929:34343 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-16T21:00:55,952 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=53858618241cf550a0c934f05cd0540f, regionState=OPENING, regionLocation=6b259cf15929,41713,1734382821274 2024-12-16T21:00:55,952 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=43 updating hbase:meta row=330ebab1e871e4cea2cd8940d534008f, regionState=OPENING, regionLocation=6b259cf15929,42323,1734382821139 2024-12-16T21:00:55,954 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=43, state=RUNNABLE; OpenRegionProcedure 330ebab1e871e4cea2cd8940d534008f, server=6b259cf15929,42323,1734382821139}] 2024-12-16T21:00:55,955 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=45, ppid=42, state=RUNNABLE; OpenRegionProcedure 53858618241cf550a0c934f05cd0540f, server=6b259cf15929,41713,1734382821274}] 2024-12-16T21:00:56,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-16T21:00:56,107 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6b259cf15929,42323,1734382821139 2024-12-16T21:00:56,107 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6b259cf15929,41713,1734382821274 2024-12-16T21:00:56,111 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] handler.AssignRegionHandler(135): Open testExportWithResetTtl,,1734382855723.53858618241cf550a0c934f05cd0540f. 2024-12-16T21:00:56,111 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7285): Opening region: {ENCODED => 53858618241cf550a0c934f05cd0540f, NAME => 'testExportWithResetTtl,,1734382855723.53858618241cf550a0c934f05cd0540f.', STARTKEY => '', ENDKEY => '1'} 2024-12-16T21:00:56,111 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] handler.AssignRegionHandler(135): Open testExportWithResetTtl,1,1734382855723.330ebab1e871e4cea2cd8940d534008f. 2024-12-16T21:00:56,112 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7285): Opening region: {ENCODED => 330ebab1e871e4cea2cd8940d534008f, NAME => 'testExportWithResetTtl,1,1734382855723.330ebab1e871e4cea2cd8940d534008f.', STARTKEY => '1', ENDKEY => ''} 2024-12-16T21:00:56,112 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7999): Registered coprocessor service: region=testExportWithResetTtl,,1734382855723.53858618241cf550a0c934f05cd0540f. service=AccessControlService 2024-12-16T21:00:56,112 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7999): Registered coprocessor service: region=testExportWithResetTtl,1,1734382855723.330ebab1e871e4cea2cd8940d534008f. service=AccessControlService 2024-12-16T21:00:56,112 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T21:00:56,112 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T21:00:56,112 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 330ebab1e871e4cea2cd8940d534008f 2024-12-16T21:00:56,112 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 53858618241cf550a0c934f05cd0540f 2024-12-16T21:00:56,112 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(894): Instantiated testExportWithResetTtl,1,1734382855723.330ebab1e871e4cea2cd8940d534008f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:00:56,112 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(894): Instantiated testExportWithResetTtl,,1734382855723.53858618241cf550a0c934f05cd0540f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:00:56,113 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7327): checking encryption for 330ebab1e871e4cea2cd8940d534008f 2024-12-16T21:00:56,113 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7327): checking encryption for 53858618241cf550a0c934f05cd0540f 2024-12-16T21:00:56,113 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7330): checking classloading for 330ebab1e871e4cea2cd8940d534008f 2024-12-16T21:00:56,113 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7330): checking classloading for 53858618241cf550a0c934f05cd0540f 2024-12-16T21:00:56,114 INFO [StoreOpener-53858618241cf550a0c934f05cd0540f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 53858618241cf550a0c934f05cd0540f 2024-12-16T21:00:56,114 INFO [StoreOpener-330ebab1e871e4cea2cd8940d534008f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 330ebab1e871e4cea2cd8940d534008f 2024-12-16T21:00:56,116 INFO [StoreOpener-330ebab1e871e4cea2cd8940d534008f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 330ebab1e871e4cea2cd8940d534008f columnFamilyName cf 2024-12-16T21:00:56,116 INFO [StoreOpener-53858618241cf550a0c934f05cd0540f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 53858618241cf550a0c934f05cd0540f columnFamilyName cf 2024-12-16T21:00:56,116 DEBUG [StoreOpener-330ebab1e871e4cea2cd8940d534008f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:00:56,116 DEBUG [StoreOpener-53858618241cf550a0c934f05cd0540f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:00:56,117 INFO [StoreOpener-330ebab1e871e4cea2cd8940d534008f-1 {}] regionserver.HStore(327): Store=330ebab1e871e4cea2cd8940d534008f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T21:00:56,117 INFO [StoreOpener-53858618241cf550a0c934f05cd0540f-1 {}] regionserver.HStore(327): Store=53858618241cf550a0c934f05cd0540f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T21:00:56,118 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportWithResetTtl/330ebab1e871e4cea2cd8940d534008f 2024-12-16T21:00:56,118 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportWithResetTtl/53858618241cf550a0c934f05cd0540f 2024-12-16T21:00:56,118 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportWithResetTtl/330ebab1e871e4cea2cd8940d534008f 2024-12-16T21:00:56,118 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportWithResetTtl/53858618241cf550a0c934f05cd0540f 2024-12-16T21:00:56,120 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1085): writing seq id for 330ebab1e871e4cea2cd8940d534008f 2024-12-16T21:00:56,121 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(1085): writing seq id for 53858618241cf550a0c934f05cd0540f 2024-12-16T21:00:56,123 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportWithResetTtl/330ebab1e871e4cea2cd8940d534008f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T21:00:56,123 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1102): Opened 330ebab1e871e4cea2cd8940d534008f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66488553, jitterRate=-0.009243354201316833}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T21:00:56,123 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportWithResetTtl/53858618241cf550a0c934f05cd0540f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T21:00:56,124 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(1102): Opened 53858618241cf550a0c934f05cd0540f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65356737, jitterRate=-0.026108726859092712}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T21:00:56,124 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1001): Region open journal for 330ebab1e871e4cea2cd8940d534008f: 2024-12-16T21:00:56,124 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(1001): Region open journal for 53858618241cf550a0c934f05cd0540f: 2024-12-16T21:00:56,125 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegionServer(2601): Post open deploy tasks for testExportWithResetTtl,1,1734382855723.330ebab1e871e4cea2cd8940d534008f., pid=44, masterSystemTime=1734382856107 2024-12-16T21:00:56,125 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegionServer(2601): Post open deploy tasks for testExportWithResetTtl,,1734382855723.53858618241cf550a0c934f05cd0540f., pid=45, masterSystemTime=1734382856107 2024-12-16T21:00:56,127 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegionServer(2628): Finished post open deploy task for testExportWithResetTtl,,1734382855723.53858618241cf550a0c934f05cd0540f. 2024-12-16T21:00:56,127 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] handler.AssignRegionHandler(164): Opened testExportWithResetTtl,,1734382855723.53858618241cf550a0c934f05cd0540f. 2024-12-16T21:00:56,128 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=53858618241cf550a0c934f05cd0540f, regionState=OPEN, openSeqNum=2, regionLocation=6b259cf15929,41713,1734382821274 2024-12-16T21:00:56,128 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegionServer(2628): Finished post open deploy task for testExportWithResetTtl,1,1734382855723.330ebab1e871e4cea2cd8940d534008f. 2024-12-16T21:00:56,128 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] handler.AssignRegionHandler(164): Opened testExportWithResetTtl,1,1734382855723.330ebab1e871e4cea2cd8940d534008f. 2024-12-16T21:00:56,128 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=43 updating hbase:meta row=330ebab1e871e4cea2cd8940d534008f, regionState=OPEN, openSeqNum=2, regionLocation=6b259cf15929,42323,1734382821139 2024-12-16T21:00:56,132 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=45, resume processing ppid=42 2024-12-16T21:00:56,132 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, ppid=42, state=SUCCESS; OpenRegionProcedure 53858618241cf550a0c934f05cd0540f, server=6b259cf15929,41713,1734382821274 in 174 msec 2024-12-16T21:00:56,132 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=43 2024-12-16T21:00:56,133 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=43, state=SUCCESS; OpenRegionProcedure 330ebab1e871e4cea2cd8940d534008f, server=6b259cf15929,42323,1734382821139 in 176 msec 2024-12-16T21:00:56,133 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=41, state=SUCCESS; TransitRegionStateProcedure table=testExportWithResetTtl, region=53858618241cf550a0c934f05cd0540f, ASSIGN in 333 msec 2024-12-16T21:00:56,135 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=43, resume processing ppid=41 2024-12-16T21:00:56,135 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, ppid=41, state=SUCCESS; TransitRegionStateProcedure table=testExportWithResetTtl, region=330ebab1e871e4cea2cd8940d534008f, ASSIGN in 334 msec 2024-12-16T21:00:56,135 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T21:00:56,136 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382856136"}]},"ts":"1734382856136"} 2024-12-16T21:00:56,137 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-16T21:00:56,140 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T21:00:56,141 DEBUG [PEWorker-4 {}] access.PermissionStorage(175): Writing permission with rowKey testExportWithResetTtl jenkins: RWXCA 2024-12-16T21:00:56,142 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42323 {}] access.PermissionStorage(611): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-16T21:00:56,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:00:56,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:00:56,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:00:56,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:00:56,147 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-16T21:00:56,147 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-16T21:00:56,147 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-16T21:00:56,147 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-16T21:00:56,147 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-16T21:00:56,147 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-16T21:00:56,148 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-16T21:00:56,148 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-16T21:00:56,148 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, state=SUCCESS; CreateTableProcedure table=testExportWithResetTtl in 422 msec 2024-12-16T21:00:56,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-16T21:00:56,333 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testExportWithResetTtl, procId: 41 completed 2024-12-16T21:00:56,333 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testExportWithResetTtl get assigned. Timeout = 60000ms 2024-12-16T21:00:56,334 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T21:00:56,338 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testExportWithResetTtl assigned to meta. Checking AM states. 2024-12-16T21:00:56,338 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T21:00:56,339 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testExportWithResetTtl assigned. 2024-12-16T21:00:56,363 DEBUG [htable-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:00:56,364 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41713 {}] regionserver.HRegion(8254): writing data to region testExportWithResetTtl,,1734382855723.53858618241cf550a0c934f05cd0540f. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T21:00:56,368 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39758, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:00:56,370 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42323 {}] regionserver.HRegion(8254): writing data to region testExportWithResetTtl,1,1734382855723.330ebab1e871e4cea2cd8940d534008f. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T21:00:56,376 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testExportWithResetTtl 2024-12-16T21:00:56,376 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testExportWithResetTtl,,1734382855723.53858618241cf550a0c934f05cd0540f. 2024-12-16T21:00:56,376 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T21:00:56,390 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-16T21:00:56,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734382856390 (current time:1734382856390). 2024-12-16T21:00:56,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-16T21:00:56,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T21:00:56,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2d9b6a49 to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6899e19e 2024-12-16T21:00:56,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@574c4f7d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:00:56,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:00:56,397 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54280, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:00:56,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2d9b6a49 to 127.0.0.1:61742 2024-12-16T21:00:56,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:00:56,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2f849fae to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@8b61ed1 2024-12-16T21:00:56,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d349f1d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:00:56,405 DEBUG [hconnection-0x3ff2a3b6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:00:56,406 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54288, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:00:56,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:00:56,408 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39772, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:00:56,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2f849fae to 127.0.0.1:61742 2024-12-16T21:00:56,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:00:56,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] access.PermissionStorage(611): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-16T21:00:56,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T21:00:56,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=46, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-16T21:00:56,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 46 2024-12-16T21:00:56,413 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PREPARE 2024-12-16T21:00:56,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46 2024-12-16T21:00:56,414 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T21:00:56,416 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T21:00:56,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741914_1090 (size=143) 2024-12-16T21:00:56,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741914_1090 (size=143) 2024-12-16T21:00:56,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741914_1090 (size=143) 2024-12-16T21:00:56,428 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T21:00:56,429 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=47, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 53858618241cf550a0c934f05cd0540f}, {pid=48, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 330ebab1e871e4cea2cd8940d534008f}] 2024-12-16T21:00:56,430 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=47, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 53858618241cf550a0c934f05cd0540f 2024-12-16T21:00:56,430 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=48, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 330ebab1e871e4cea2cd8940d534008f 2024-12-16T21:00:56,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46 2024-12-16T21:00:56,581 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6b259cf15929,41713,1734382821274 2024-12-16T21:00:56,581 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6b259cf15929,42323,1734382821139 2024-12-16T21:00:56,582 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42323 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=48 2024-12-16T21:00:56,582 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41713 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=47 2024-12-16T21:00:56,582 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,,1734382855723.53858618241cf550a0c934f05cd0540f. 2024-12-16T21:00:56,582 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HRegion(2837): Flushing 53858618241cf550a0c934f05cd0540f 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-16T21:00:56,582 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,1,1734382855723.330ebab1e871e4cea2cd8940d534008f. 2024-12-16T21:00:56,583 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing 330ebab1e871e4cea2cd8940d534008f 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-16T21:00:56,610 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportWithResetTtl/330ebab1e871e4cea2cd8940d534008f/.tmp/cf/ba2bcb1863cd4b2cbba0d503be122595 is 71, key is 15aa85f8bf5ba845d199144d36fcc28a/cf:q/1734382856370/Put/seqid=0 2024-12-16T21:00:56,613 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportWithResetTtl/53858618241cf550a0c934f05cd0540f/.tmp/cf/045a0d7af7f240ae97d00fb046ef4514 is 71, key is 05260afda47569959b8830302166b914/cf:q/1734382856364/Put/seqid=0 2024-12-16T21:00:56,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741915_1091 (size=8258) 2024-12-16T21:00:56,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741915_1091 (size=8258) 2024-12-16T21:00:56,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741915_1091 (size=8258) 2024-12-16T21:00:56,628 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportWithResetTtl/330ebab1e871e4cea2cd8940d534008f/.tmp/cf/ba2bcb1863cd4b2cbba0d503be122595 2024-12-16T21:00:56,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741916_1092 (size=5354) 2024-12-16T21:00:56,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741916_1092 (size=5354) 2024-12-16T21:00:56,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741916_1092 (size=5354) 2024-12-16T21:00:56,639 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportWithResetTtl/53858618241cf550a0c934f05cd0540f/.tmp/cf/045a0d7af7f240ae97d00fb046ef4514 2024-12-16T21:00:56,650 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportWithResetTtl/53858618241cf550a0c934f05cd0540f/.tmp/cf/045a0d7af7f240ae97d00fb046ef4514 as hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportWithResetTtl/53858618241cf550a0c934f05cd0540f/cf/045a0d7af7f240ae97d00fb046ef4514 2024-12-16T21:00:56,658 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportWithResetTtl/53858618241cf550a0c934f05cd0540f/cf/045a0d7af7f240ae97d00fb046ef4514, entries=4, sequenceid=5, filesize=5.2 K 2024-12-16T21:00:56,659 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HRegion(3040): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 53858618241cf550a0c934f05cd0540f in 77ms, sequenceid=5, compaction requested=false 2024-12-16T21:00:56,659 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportWithResetTtl' 2024-12-16T21:00:56,660 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HRegion(2538): Flush status journal for 53858618241cf550a0c934f05cd0540f: 2024-12-16T21:00:56,660 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,,1734382855723.53858618241cf550a0c934f05cd0540f. for snaptb-testExportWithResetTtl completed. 2024-12-16T21:00:56,660 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,,1734382855723.53858618241cf550a0c934f05cd0540f.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-16T21:00:56,661 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T21:00:56,661 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportWithResetTtl/53858618241cf550a0c934f05cd0540f/cf/045a0d7af7f240ae97d00fb046ef4514] hfiles 2024-12-16T21:00:56,661 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportWithResetTtl/53858618241cf550a0c934f05cd0540f/cf/045a0d7af7f240ae97d00fb046ef4514 for snapshot=snaptb-testExportWithResetTtl 2024-12-16T21:00:56,662 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportWithResetTtl/330ebab1e871e4cea2cd8940d534008f/.tmp/cf/ba2bcb1863cd4b2cbba0d503be122595 as hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportWithResetTtl/330ebab1e871e4cea2cd8940d534008f/cf/ba2bcb1863cd4b2cbba0d503be122595 2024-12-16T21:00:56,674 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportWithResetTtl/330ebab1e871e4cea2cd8940d534008f/cf/ba2bcb1863cd4b2cbba0d503be122595, entries=46, sequenceid=5, filesize=8.1 K 2024-12-16T21:00:56,675 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 330ebab1e871e4cea2cd8940d534008f in 93ms, sequenceid=5, compaction requested=false 2024-12-16T21:00:56,676 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for 330ebab1e871e4cea2cd8940d534008f: 2024-12-16T21:00:56,676 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,1,1734382855723.330ebab1e871e4cea2cd8940d534008f. for snaptb-testExportWithResetTtl completed. 2024-12-16T21:00:56,676 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,1,1734382855723.330ebab1e871e4cea2cd8940d534008f.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-16T21:00:56,676 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T21:00:56,676 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportWithResetTtl/330ebab1e871e4cea2cd8940d534008f/cf/ba2bcb1863cd4b2cbba0d503be122595] hfiles 2024-12-16T21:00:56,676 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportWithResetTtl/330ebab1e871e4cea2cd8940d534008f/cf/ba2bcb1863cd4b2cbba0d503be122595 for snapshot=snaptb-testExportWithResetTtl 2024-12-16T21:00:56,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741917_1093 (size=100) 2024-12-16T21:00:56,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741917_1093 (size=100) 2024-12-16T21:00:56,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741917_1093 (size=100) 2024-12-16T21:00:56,695 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,,1734382855723.53858618241cf550a0c934f05cd0540f. 2024-12-16T21:00:56,695 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=47 2024-12-16T21:00:56,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.HMaster(4106): Remote procedure done, pid=47 2024-12-16T21:00:56,698 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 53858618241cf550a0c934f05cd0540f 2024-12-16T21:00:56,699 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=47, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 53858618241cf550a0c934f05cd0540f 2024-12-16T21:00:56,701 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, ppid=46, state=SUCCESS; SnapshotRegionProcedure 53858618241cf550a0c934f05cd0540f in 272 msec 2024-12-16T21:00:56,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46 2024-12-16T21:00:56,731 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0001_000001 (auth:SIMPLE) from 127.0.0.1:60750 2024-12-16T21:00:56,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741918_1094 (size=100) 2024-12-16T21:00:56,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741918_1094 (size=100) 2024-12-16T21:00:56,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741918_1094 (size=100) 2024-12-16T21:00:56,739 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,1,1734382855723.330ebab1e871e4cea2cd8940d534008f. 2024-12-16T21:00:56,739 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-12-16T21:00:56,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-12-16T21:00:56,740 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 330ebab1e871e4cea2cd8940d534008f 2024-12-16T21:00:56,740 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=48, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 330ebab1e871e4cea2cd8940d534008f 2024-12-16T21:00:56,744 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=46 2024-12-16T21:00:56,744 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=46, state=SUCCESS; SnapshotRegionProcedure 330ebab1e871e4cea2cd8940d534008f in 313 msec 2024-12-16T21:00:56,744 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T21:00:56,745 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T21:00:56,746 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T21:00:56,746 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb-testExportWithResetTtl 2024-12-16T21:00:56,747 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-0_1/usercache/jenkins/appcache/application_1734382828217_0001/container_1734382828217_0001_01_000001/launch_container.sh] 2024-12-16T21:00:56,747 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-0_1/usercache/jenkins/appcache/application_1734382828217_0001/container_1734382828217_0001_01_000001/container_tokens] 2024-12-16T21:00:56,747 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-16T21:00:56,747 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-0_1/usercache/jenkins/appcache/application_1734382828217_0001/container_1734382828217_0001_01_000001/sysfs] 2024-12-16T21:00:56,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741919_1095 (size=600) 2024-12-16T21:00:56,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741919_1095 (size=600) 2024-12-16T21:00:56,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741919_1095 (size=600) 2024-12-16T21:00:56,789 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T21:00:56,797 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T21:00:56,797 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-16T21:00:56,800 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T21:00:56,800 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 46 2024-12-16T21:00:56,802 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } in 389 msec 2024-12-16T21:00:57,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46 2024-12-16T21:00:57,018 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testExportWithResetTtl, procId: 46 completed 2024-12-16T21:00:57,032 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382857032 2024-12-16T21:00:57,032 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:37263, tgtDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382857032, rawTgtDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382857032, srcFsUri=hdfs://localhost:37263, srcDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:00:57,068 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:37263, inputRoot=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:00:57,068 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-715246216_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382857032, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382857032/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-16T21:00:57,071 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-16T21:00:57,076 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb-testExportWithResetTtl to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382857032/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-16T21:00:57,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741920_1096 (size=600) 2024-12-16T21:00:57,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741920_1096 (size=600) 2024-12-16T21:00:57,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741921_1097 (size=143) 2024-12-16T21:00:57,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741921_1097 (size=143) 2024-12-16T21:00:57,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741920_1096 (size=600) 2024-12-16T21:00:57,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741921_1097 (size=143) 2024-12-16T21:00:57,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741922_1098 (size=141) 2024-12-16T21:00:57,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741922_1098 (size=141) 2024-12-16T21:00:57,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741922_1098 (size=141) 2024-12-16T21:00:57,107 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-16T21:00:57,107 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-16T21:00:57,107 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-16T21:00:57,108 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-16T21:00:57,863 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-16T21:00:57,865 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41838, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-16T21:00:58,214 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/hadoop-3094624403150434077.jar 2024-12-16T21:00:58,215 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-16T21:00:58,215 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-16T21:00:58,303 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/hadoop-3208646358554978484.jar 2024-12-16T21:00:58,303 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-16T21:00:58,304 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-16T21:00:58,304 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-16T21:00:58,304 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-16T21:00:58,305 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-16T21:00:58,305 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-16T21:00:58,305 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-16T21:00:58,305 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-16T21:00:58,306 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-16T21:00:58,306 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-16T21:00:58,306 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-16T21:00:58,306 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-16T21:00:58,307 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-16T21:00:58,307 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-16T21:00:58,307 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-16T21:00:58,307 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-16T21:00:58,307 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-16T21:00:58,308 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-16T21:00:58,308 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T21:00:58,308 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T21:00:58,309 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T21:00:58,309 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T21:00:58,309 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T21:00:58,309 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T21:00:58,309 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T21:00:58,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741923_1099 (size=6350920) 2024-12-16T21:00:58,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741923_1099 (size=6350920) 2024-12-16T21:00:58,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741923_1099 (size=6350920) 2024-12-16T21:00:58,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741924_1100 (size=127628) 2024-12-16T21:00:58,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741924_1100 (size=127628) 2024-12-16T21:00:58,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741924_1100 (size=127628) 2024-12-16T21:00:58,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741925_1101 (size=2172137) 2024-12-16T21:00:58,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741925_1101 (size=2172137) 2024-12-16T21:00:58,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741925_1101 (size=2172137) 2024-12-16T21:00:58,428 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-16T21:00:58,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741926_1102 (size=213228) 2024-12-16T21:00:58,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741926_1102 (size=213228) 2024-12-16T21:00:58,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741926_1102 (size=213228) 2024-12-16T21:00:58,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741927_1103 (size=1877034) 2024-12-16T21:00:58,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741927_1103 (size=1877034) 2024-12-16T21:00:58,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741927_1103 (size=1877034) 2024-12-16T21:00:58,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741928_1104 (size=533455) 2024-12-16T21:00:58,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741928_1104 (size=533455) 2024-12-16T21:00:58,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741928_1104 (size=533455) 2024-12-16T21:00:58,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741929_1105 (size=7280644) 2024-12-16T21:00:58,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741929_1105 (size=7280644) 2024-12-16T21:00:58,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741929_1105 (size=7280644) 2024-12-16T21:00:58,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741930_1106 (size=4188619) 2024-12-16T21:00:58,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741930_1106 (size=4188619) 2024-12-16T21:00:58,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741930_1106 (size=4188619) 2024-12-16T21:00:58,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741931_1107 (size=20406) 2024-12-16T21:00:58,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741931_1107 (size=20406) 2024-12-16T21:00:58,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741931_1107 (size=20406) 2024-12-16T21:00:58,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741932_1108 (size=75495) 2024-12-16T21:00:58,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741932_1108 (size=75495) 2024-12-16T21:00:58,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741932_1108 (size=75495) 2024-12-16T21:00:58,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741933_1109 (size=45609) 2024-12-16T21:00:58,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741933_1109 (size=45609) 2024-12-16T21:00:58,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741933_1109 (size=45609) 2024-12-16T21:00:58,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741934_1110 (size=110084) 2024-12-16T21:00:58,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741934_1110 (size=110084) 2024-12-16T21:00:58,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741934_1110 (size=110084) 2024-12-16T21:00:58,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741935_1111 (size=1323991) 2024-12-16T21:00:58,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741935_1111 (size=1323991) 2024-12-16T21:00:58,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741935_1111 (size=1323991) 2024-12-16T21:00:58,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741936_1112 (size=23076) 2024-12-16T21:00:58,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741936_1112 (size=23076) 2024-12-16T21:00:58,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741936_1112 (size=23076) 2024-12-16T21:00:58,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741937_1113 (size=126803) 2024-12-16T21:00:58,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741937_1113 (size=126803) 2024-12-16T21:00:58,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741937_1113 (size=126803) 2024-12-16T21:00:58,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741938_1114 (size=322274) 2024-12-16T21:00:58,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741938_1114 (size=322274) 2024-12-16T21:00:58,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741938_1114 (size=322274) 2024-12-16T21:00:58,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741939_1115 (size=1832290) 2024-12-16T21:00:58,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741939_1115 (size=1832290) 2024-12-16T21:00:58,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741939_1115 (size=1832290) 2024-12-16T21:00:58,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741940_1116 (size=30081) 2024-12-16T21:00:58,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741940_1116 (size=30081) 2024-12-16T21:00:58,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741940_1116 (size=30081) 2024-12-16T21:00:58,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741941_1117 (size=53616) 2024-12-16T21:00:58,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741941_1117 (size=53616) 2024-12-16T21:00:58,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741941_1117 (size=53616) 2024-12-16T21:00:59,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741942_1118 (size=29229) 2024-12-16T21:00:59,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741942_1118 (size=29229) 2024-12-16T21:00:59,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741942_1118 (size=29229) 2024-12-16T21:00:59,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741943_1119 (size=169089) 2024-12-16T21:00:59,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741943_1119 (size=169089) 2024-12-16T21:00:59,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741943_1119 (size=169089) 2024-12-16T21:00:59,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741944_1120 (size=5175431) 2024-12-16T21:00:59,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741944_1120 (size=5175431) 2024-12-16T21:00:59,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741944_1120 (size=5175431) 2024-12-16T21:00:59,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741945_1121 (size=136454) 2024-12-16T21:00:59,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741945_1121 (size=136454) 2024-12-16T21:00:59,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741945_1121 (size=136454) 2024-12-16T21:00:59,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741946_1122 (size=451756) 2024-12-16T21:00:59,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741946_1122 (size=451756) 2024-12-16T21:00:59,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741946_1122 (size=451756) 2024-12-16T21:00:59,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741947_1123 (size=907465) 2024-12-16T21:00:59,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741947_1123 (size=907465) 2024-12-16T21:00:59,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741947_1123 (size=907465) 2024-12-16T21:00:59,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741948_1124 (size=3317408) 2024-12-16T21:00:59,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741948_1124 (size=3317408) 2024-12-16T21:00:59,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741948_1124 (size=3317408) 2024-12-16T21:00:59,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741949_1125 (size=503880) 2024-12-16T21:00:59,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741949_1125 (size=503880) 2024-12-16T21:00:59,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741949_1125 (size=503880) 2024-12-16T21:00:59,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741950_1126 (size=4695811) 2024-12-16T21:00:59,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741950_1126 (size=4695811) 2024-12-16T21:00:59,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741950_1126 (size=4695811) 2024-12-16T21:00:59,610 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-16T21:00:59,616 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb-testExportWithResetTtl' hfile list 2024-12-16T21:00:59,620 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-16T21:00:59,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741951_1127 (size=324) 2024-12-16T21:00:59,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741951_1127 (size=324) 2024-12-16T21:00:59,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741951_1127 (size=324) 2024-12-16T21:00:59,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741952_1128 (size=15) 2024-12-16T21:00:59,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741952_1128 (size=15) 2024-12-16T21:00:59,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741952_1128 (size=15) 2024-12-16T21:01:00,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741953_1129 (size=304877) 2024-12-16T21:01:00,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741953_1129 (size=304877) 2024-12-16T21:01:00,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741953_1129 (size=304877) 2024-12-16T21:01:00,192 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-16T21:01:00,192 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-16T21:01:00,729 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0002_000001 (auth:SIMPLE) from 127.0.0.1:37364 2024-12-16T21:01:00,872 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-16T21:01:00,872 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-16T21:01:00,874 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-16T21:01:00,874 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-16T21:01:00,875 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-16T21:01:05,895 INFO [master/6b259cf15929:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-16T21:01:05,895 INFO [master/6b259cf15929:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-16T21:01:06,378 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-16T21:01:07,327 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0002_000001 (auth:SIMPLE) from 127.0.0.1:52246 2024-12-16T21:01:07,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741954_1130 (size=350551) 2024-12-16T21:01:07,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741954_1130 (size=350551) 2024-12-16T21:01:07,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741954_1130 (size=350551) 2024-12-16T21:01:09,623 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0002_000001 (auth:SIMPLE) from 127.0.0.1:37376 2024-12-16T21:01:13,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741955_1131 (size=8258) 2024-12-16T21:01:13,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741955_1131 (size=8258) 2024-12-16T21:01:13,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741955_1131 (size=8258) 2024-12-16T21:01:13,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741956_1132 (size=5354) 2024-12-16T21:01:13,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741956_1132 (size=5354) 2024-12-16T21:01:13,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741956_1132 (size=5354) 2024-12-16T21:01:13,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741957_1133 (size=17398) 2024-12-16T21:01:13,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741957_1133 (size=17398) 2024-12-16T21:01:13,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741957_1133 (size=17398) 2024-12-16T21:01:13,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741958_1134 (size=461) 2024-12-16T21:01:13,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741958_1134 (size=461) 2024-12-16T21:01:13,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741958_1134 (size=461) 2024-12-16T21:01:13,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741959_1135 (size=17398) 2024-12-16T21:01:13,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741959_1135 (size=17398) 2024-12-16T21:01:13,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741959_1135 (size=17398) 2024-12-16T21:01:13,499 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-0_1/usercache/jenkins/appcache/application_1734382828217_0002/container_1734382828217_0002_01_000002/launch_container.sh] 2024-12-16T21:01:13,500 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-0_1/usercache/jenkins/appcache/application_1734382828217_0002/container_1734382828217_0002_01_000002/container_tokens] 2024-12-16T21:01:13,500 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-0_1/usercache/jenkins/appcache/application_1734382828217_0002/container_1734382828217_0002_01_000002/sysfs] 2024-12-16T21:01:13,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741960_1136 (size=350551) 2024-12-16T21:01:13,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741960_1136 (size=350551) 2024-12-16T21:01:13,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741960_1136 (size=350551) 2024-12-16T21:01:13,524 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0002_000001 (auth:SIMPLE) from 127.0.0.1:40854 2024-12-16T21:01:15,355 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-16T21:01:15,357 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-16T21:01:15,364 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb-testExportWithResetTtl 2024-12-16T21:01:15,364 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-16T21:01:15,365 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-16T21:01:15,365 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-715246216_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-16T21:01:15,366 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-16T21:01:15,366 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-16T21:01:15,366 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-715246216_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382857032/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382857032/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-16T21:01:15,366 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382857032/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-16T21:01:15,367 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382857032/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-16T21:01:15,380 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testExportWithResetTtl 2024-12-16T21:01:15,381 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testExportWithResetTtl 2024-12-16T21:01:15,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testExportWithResetTtl 2024-12-16T21:01:15,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-16T21:01:15,385 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382875384"}]},"ts":"1734382875384"} 2024-12-16T21:01:15,387 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-16T21:01:15,389 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testExportWithResetTtl to state=DISABLING 2024-12-16T21:01:15,390 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testExportWithResetTtl}] 2024-12-16T21:01:15,391 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=51, ppid=50, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testExportWithResetTtl, region=53858618241cf550a0c934f05cd0540f, UNASSIGN}, {pid=52, ppid=50, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testExportWithResetTtl, region=330ebab1e871e4cea2cd8940d534008f, UNASSIGN}] 2024-12-16T21:01:15,393 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=52, ppid=50, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testExportWithResetTtl, region=330ebab1e871e4cea2cd8940d534008f, UNASSIGN 2024-12-16T21:01:15,393 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=51, ppid=50, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testExportWithResetTtl, region=53858618241cf550a0c934f05cd0540f, UNASSIGN 2024-12-16T21:01:15,393 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=51 updating hbase:meta row=53858618241cf550a0c934f05cd0540f, regionState=CLOSING, regionLocation=6b259cf15929,41713,1734382821274 2024-12-16T21:01:15,393 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=52 updating hbase:meta row=330ebab1e871e4cea2cd8940d534008f, regionState=CLOSING, regionLocation=6b259cf15929,42323,1734382821139 2024-12-16T21:01:15,395 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T21:01:15,395 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=53, ppid=51, state=RUNNABLE; CloseRegionProcedure 53858618241cf550a0c934f05cd0540f, server=6b259cf15929,41713,1734382821274}] 2024-12-16T21:01:15,396 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T21:01:15,396 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=52, state=RUNNABLE; CloseRegionProcedure 330ebab1e871e4cea2cd8940d534008f, server=6b259cf15929,42323,1734382821139}] 2024-12-16T21:01:15,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-16T21:01:15,548 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6b259cf15929,42323,1734382821139 2024-12-16T21:01:15,548 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6b259cf15929,41713,1734382821274 2024-12-16T21:01:15,549 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] handler.UnassignRegionHandler(124): Close 53858618241cf550a0c934f05cd0540f 2024-12-16T21:01:15,549 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T21:01:15,549 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] handler.UnassignRegionHandler(124): Close 330ebab1e871e4cea2cd8940d534008f 2024-12-16T21:01:15,549 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T21:01:15,549 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1681): Closing 53858618241cf550a0c934f05cd0540f, disabling compactions & flushes 2024-12-16T21:01:15,549 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1681): Closing 330ebab1e871e4cea2cd8940d534008f, disabling compactions & flushes 2024-12-16T21:01:15,549 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1703): Closing region testExportWithResetTtl,1,1734382855723.330ebab1e871e4cea2cd8940d534008f. 2024-12-16T21:01:15,549 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1703): Closing region testExportWithResetTtl,,1734382855723.53858618241cf550a0c934f05cd0540f. 2024-12-16T21:01:15,549 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportWithResetTtl,1,1734382855723.330ebab1e871e4cea2cd8940d534008f. 2024-12-16T21:01:15,549 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1791): Acquired close lock on testExportWithResetTtl,1,1734382855723.330ebab1e871e4cea2cd8940d534008f. after waiting 0 ms 2024-12-16T21:01:15,549 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportWithResetTtl,,1734382855723.53858618241cf550a0c934f05cd0540f. 2024-12-16T21:01:15,549 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1791): Acquired close lock on testExportWithResetTtl,,1734382855723.53858618241cf550a0c934f05cd0540f. after waiting 0 ms 2024-12-16T21:01:15,549 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1801): Updates disabled for region testExportWithResetTtl,1,1734382855723.330ebab1e871e4cea2cd8940d534008f. 2024-12-16T21:01:15,549 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1801): Updates disabled for region testExportWithResetTtl,,1734382855723.53858618241cf550a0c934f05cd0540f. 2024-12-16T21:01:15,559 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportWithResetTtl/53858618241cf550a0c934f05cd0540f/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-16T21:01:15,559 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T21:01:15,560 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1922): Closed testExportWithResetTtl,,1734382855723.53858618241cf550a0c934f05cd0540f. 2024-12-16T21:01:15,560 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1635): Region close journal for 53858618241cf550a0c934f05cd0540f: 2024-12-16T21:01:15,562 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] handler.UnassignRegionHandler(170): Closed 53858618241cf550a0c934f05cd0540f 2024-12-16T21:01:15,562 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=51 updating hbase:meta row=53858618241cf550a0c934f05cd0540f, regionState=CLOSED 2024-12-16T21:01:15,570 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=53, resume processing ppid=51 2024-12-16T21:01:15,572 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, ppid=51, state=SUCCESS; CloseRegionProcedure 53858618241cf550a0c934f05cd0540f, server=6b259cf15929,41713,1734382821274 in 169 msec 2024-12-16T21:01:15,572 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, ppid=50, state=SUCCESS; TransitRegionStateProcedure table=testExportWithResetTtl, region=53858618241cf550a0c934f05cd0540f, UNASSIGN in 179 msec 2024-12-16T21:01:15,574 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportWithResetTtl/330ebab1e871e4cea2cd8940d534008f/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-16T21:01:15,575 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T21:01:15,575 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1922): Closed testExportWithResetTtl,1,1734382855723.330ebab1e871e4cea2cd8940d534008f. 2024-12-16T21:01:15,575 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1635): Region close journal for 330ebab1e871e4cea2cd8940d534008f: 2024-12-16T21:01:15,577 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=52 updating hbase:meta row=330ebab1e871e4cea2cd8940d534008f, regionState=CLOSED 2024-12-16T21:01:15,577 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] handler.UnassignRegionHandler(170): Closed 330ebab1e871e4cea2cd8940d534008f 2024-12-16T21:01:15,581 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=52 2024-12-16T21:01:15,581 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=52, state=SUCCESS; CloseRegionProcedure 330ebab1e871e4cea2cd8940d534008f, server=6b259cf15929,42323,1734382821139 in 183 msec 2024-12-16T21:01:15,583 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=50 2024-12-16T21:01:15,583 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=50, state=SUCCESS; TransitRegionStateProcedure table=testExportWithResetTtl, region=330ebab1e871e4cea2cd8940d534008f, UNASSIGN in 190 msec 2024-12-16T21:01:15,585 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-12-16T21:01:15,585 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; CloseTableRegionsProcedure table=testExportWithResetTtl in 193 msec 2024-12-16T21:01:15,587 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382875587"}]},"ts":"1734382875587"} 2024-12-16T21:01:15,588 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-16T21:01:15,591 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testExportWithResetTtl to state=DISABLED 2024-12-16T21:01:15,593 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; DisableTableProcedure table=testExportWithResetTtl in 210 msec 2024-12-16T21:01:15,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-16T21:01:15,687 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testExportWithResetTtl, procId: 49 completed 2024-12-16T21:01:15,687 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testExportWithResetTtl 2024-12-16T21:01:15,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testExportWithResetTtl 2024-12-16T21:01:15,689 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=55, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-16T21:01:15,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] access.PermissionStorage(259): Removing permissions of removed table testExportWithResetTtl 2024-12-16T21:01:15,690 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=55, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-16T21:01:15,692 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42323 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testExportWithResetTtl 2024-12-16T21:01:15,694 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportWithResetTtl/53858618241cf550a0c934f05cd0540f 2024-12-16T21:01:15,694 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportWithResetTtl/330ebab1e871e4cea2cd8940d534008f 2024-12-16T21:01:15,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-16T21:01:15,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-16T21:01:15,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-16T21:01:15,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-16T21:01:15,697 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-16T21:01:15,697 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-16T21:01:15,697 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-16T21:01:15,698 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportWithResetTtl/53858618241cf550a0c934f05cd0540f/cf, FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportWithResetTtl/53858618241cf550a0c934f05cd0540f/recovered.edits] 2024-12-16T21:01:15,698 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-16T21:01:15,701 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportWithResetTtl/330ebab1e871e4cea2cd8940d534008f/cf, FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportWithResetTtl/330ebab1e871e4cea2cd8940d534008f/recovered.edits] 2024-12-16T21:01:15,703 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-16T21:01:15,704 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-16T21:01:15,704 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:01:15,704 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-16T21:01:15,704 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:01:15,704 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:01:15,704 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-16T21:01:15,704 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:01:15,705 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-16T21:01:15,705 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-16T21:01:15,706 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-16T21:01:15,707 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-16T21:01:15,707 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportWithResetTtl/53858618241cf550a0c934f05cd0540f/cf/045a0d7af7f240ae97d00fb046ef4514 to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testExportWithResetTtl/53858618241cf550a0c934f05cd0540f/cf/045a0d7af7f240ae97d00fb046ef4514 2024-12-16T21:01:15,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-16T21:01:15,708 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportWithResetTtl/330ebab1e871e4cea2cd8940d534008f/cf/ba2bcb1863cd4b2cbba0d503be122595 to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testExportWithResetTtl/330ebab1e871e4cea2cd8940d534008f/cf/ba2bcb1863cd4b2cbba0d503be122595 2024-12-16T21:01:15,712 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportWithResetTtl/53858618241cf550a0c934f05cd0540f/recovered.edits/8.seqid to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testExportWithResetTtl/53858618241cf550a0c934f05cd0540f/recovered.edits/8.seqid 2024-12-16T21:01:15,712 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportWithResetTtl/53858618241cf550a0c934f05cd0540f 2024-12-16T21:01:15,713 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportWithResetTtl/330ebab1e871e4cea2cd8940d534008f/recovered.edits/8.seqid to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testExportWithResetTtl/330ebab1e871e4cea2cd8940d534008f/recovered.edits/8.seqid 2024-12-16T21:01:15,714 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportWithResetTtl/330ebab1e871e4cea2cd8940d534008f 2024-12-16T21:01:15,714 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testExportWithResetTtl regions 2024-12-16T21:01:15,716 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=55, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-16T21:01:15,724 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testExportWithResetTtl from hbase:meta 2024-12-16T21:01:15,729 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'testExportWithResetTtl' descriptor. 2024-12-16T21:01:15,731 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=55, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-16T21:01:15,731 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'testExportWithResetTtl' from region states. 2024-12-16T21:01:15,732 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testExportWithResetTtl,,1734382855723.53858618241cf550a0c934f05cd0540f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734382875731"}]},"ts":"9223372036854775807"} 2024-12-16T21:01:15,732 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testExportWithResetTtl,1,1734382855723.330ebab1e871e4cea2cd8940d534008f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734382875731"}]},"ts":"9223372036854775807"} 2024-12-16T21:01:15,737 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-16T21:01:15,737 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 53858618241cf550a0c934f05cd0540f, NAME => 'testExportWithResetTtl,,1734382855723.53858618241cf550a0c934f05cd0540f.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 330ebab1e871e4cea2cd8940d534008f, NAME => 'testExportWithResetTtl,1,1734382855723.330ebab1e871e4cea2cd8940d534008f.', STARTKEY => '1', ENDKEY => ''}] 2024-12-16T21:01:15,737 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'testExportWithResetTtl' as deleted. 2024-12-16T21:01:15,737 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734382875737"}]},"ts":"9223372036854775807"} 2024-12-16T21:01:15,740 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table testExportWithResetTtl state from META 2024-12-16T21:01:15,742 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=55, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-16T21:01:15,744 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; DeleteTableProcedure table=testExportWithResetTtl in 55 msec 2024-12-16T21:01:15,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-16T21:01:15,809 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testExportWithResetTtl, procId: 55 completed 2024-12-16T21:01:15,810 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportWithResetTtl 2024-12-16T21:01:15,810 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportWithResetTtl 2024-12-16T21:01:15,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=56, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportWithResetTtl 2024-12-16T21:01:15,828 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382875828"}]},"ts":"1734382875828"} 2024-12-16T21:01:15,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=56 2024-12-16T21:01:15,831 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-16T21:01:15,833 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithResetTtl to state=DISABLING 2024-12-16T21:01:15,834 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=57, ppid=56, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl}] 2024-12-16T21:01:15,836 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=4692eddc5cc4a103c5b809ba757d08ab, UNASSIGN}, {pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=700f181be5e8b4d3e492309ab68f9d3d, UNASSIGN}] 2024-12-16T21:01:15,838 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=700f181be5e8b4d3e492309ab68f9d3d, UNASSIGN 2024-12-16T21:01:15,838 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=4692eddc5cc4a103c5b809ba757d08ab, UNASSIGN 2024-12-16T21:01:15,839 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=59 updating hbase:meta row=700f181be5e8b4d3e492309ab68f9d3d, regionState=CLOSING, regionLocation=6b259cf15929,36861,1734382821216 2024-12-16T21:01:15,839 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=58 updating hbase:meta row=4692eddc5cc4a103c5b809ba757d08ab, regionState=CLOSING, regionLocation=6b259cf15929,41713,1734382821274 2024-12-16T21:01:15,842 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T21:01:15,842 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=58, state=RUNNABLE; CloseRegionProcedure 4692eddc5cc4a103c5b809ba757d08ab, server=6b259cf15929,41713,1734382821274}] 2024-12-16T21:01:15,846 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T21:01:15,847 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=61, ppid=59, state=RUNNABLE; CloseRegionProcedure 700f181be5e8b4d3e492309ab68f9d3d, server=6b259cf15929,36861,1734382821216}] 2024-12-16T21:01:15,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=56 2024-12-16T21:01:16,003 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6b259cf15929,41713,1734382821274 2024-12-16T21:01:16,004 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(124): Close 4692eddc5cc4a103c5b809ba757d08ab 2024-12-16T21:01:16,004 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T21:01:16,004 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6b259cf15929,36861,1734382821216 2024-12-16T21:01:16,004 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1681): Closing 4692eddc5cc4a103c5b809ba757d08ab, disabling compactions & flushes 2024-12-16T21:01:16,004 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1703): Closing region testtb-testExportWithResetTtl,,1734382853168.4692eddc5cc4a103c5b809ba757d08ab. 2024-12-16T21:01:16,004 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithResetTtl,,1734382853168.4692eddc5cc4a103c5b809ba757d08ab. 2024-12-16T21:01:16,004 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithResetTtl,,1734382853168.4692eddc5cc4a103c5b809ba757d08ab. after waiting 0 ms 2024-12-16T21:01:16,004 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithResetTtl,,1734382853168.4692eddc5cc4a103c5b809ba757d08ab. 2024-12-16T21:01:16,005 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(124): Close 700f181be5e8b4d3e492309ab68f9d3d 2024-12-16T21:01:16,005 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T21:01:16,005 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1681): Closing 700f181be5e8b4d3e492309ab68f9d3d, disabling compactions & flushes 2024-12-16T21:01:16,005 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1703): Closing region testtb-testExportWithResetTtl,1,1734382853168.700f181be5e8b4d3e492309ab68f9d3d. 2024-12-16T21:01:16,005 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithResetTtl,1,1734382853168.700f181be5e8b4d3e492309ab68f9d3d. 2024-12-16T21:01:16,005 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithResetTtl,1,1734382853168.700f181be5e8b4d3e492309ab68f9d3d. after waiting 0 ms 2024-12-16T21:01:16,005 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithResetTtl,1,1734382853168.700f181be5e8b4d3e492309ab68f9d3d. 2024-12-16T21:01:16,011 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithResetTtl/700f181be5e8b4d3e492309ab68f9d3d/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T21:01:16,011 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithResetTtl/4692eddc5cc4a103c5b809ba757d08ab/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T21:01:16,012 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T21:01:16,012 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1922): Closed testtb-testExportWithResetTtl,1,1734382853168.700f181be5e8b4d3e492309ab68f9d3d. 2024-12-16T21:01:16,012 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1635): Region close journal for 700f181be5e8b4d3e492309ab68f9d3d: 2024-12-16T21:01:16,012 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T21:01:16,012 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1922): Closed testtb-testExportWithResetTtl,,1734382853168.4692eddc5cc4a103c5b809ba757d08ab. 2024-12-16T21:01:16,012 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1635): Region close journal for 4692eddc5cc4a103c5b809ba757d08ab: 2024-12-16T21:01:16,014 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(170): Closed 700f181be5e8b4d3e492309ab68f9d3d 2024-12-16T21:01:16,014 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=59 updating hbase:meta row=700f181be5e8b4d3e492309ab68f9d3d, regionState=CLOSED 2024-12-16T21:01:16,015 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(170): Closed 4692eddc5cc4a103c5b809ba757d08ab 2024-12-16T21:01:16,015 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=58 updating hbase:meta row=4692eddc5cc4a103c5b809ba757d08ab, regionState=CLOSED 2024-12-16T21:01:16,021 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=61, resume processing ppid=59 2024-12-16T21:01:16,021 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, ppid=59, state=SUCCESS; CloseRegionProcedure 700f181be5e8b4d3e492309ab68f9d3d, server=6b259cf15929,36861,1734382821216 in 173 msec 2024-12-16T21:01:16,023 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=58 2024-12-16T21:01:16,023 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=58, state=SUCCESS; CloseRegionProcedure 4692eddc5cc4a103c5b809ba757d08ab, server=6b259cf15929,41713,1734382821274 in 178 msec 2024-12-16T21:01:16,024 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, ppid=57, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=700f181be5e8b4d3e492309ab68f9d3d, UNASSIGN in 185 msec 2024-12-16T21:01:16,025 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-12-16T21:01:16,025 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=4692eddc5cc4a103c5b809ba757d08ab, UNASSIGN in 187 msec 2024-12-16T21:01:16,027 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=57, resume processing ppid=56 2024-12-16T21:01:16,027 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, ppid=56, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl in 192 msec 2024-12-16T21:01:16,029 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382876028"}]},"ts":"1734382876028"} 2024-12-16T21:01:16,030 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-16T21:01:16,032 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithResetTtl to state=DISABLED 2024-12-16T21:01:16,034 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, state=SUCCESS; DisableTableProcedure table=testtb-testExportWithResetTtl in 223 msec 2024-12-16T21:01:16,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=56 2024-12-16T21:01:16,132 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportWithResetTtl, procId: 56 completed 2024-12-16T21:01:16,133 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportWithResetTtl 2024-12-16T21:01:16,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-16T21:01:16,135 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-16T21:01:16,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportWithResetTtl 2024-12-16T21:01:16,137 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=62, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-16T21:01:16,138 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42323 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportWithResetTtl 2024-12-16T21:01:16,140 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithResetTtl/700f181be5e8b4d3e492309ab68f9d3d 2024-12-16T21:01:16,140 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithResetTtl/4692eddc5cc4a103c5b809ba757d08ab 2024-12-16T21:01:16,142 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-16T21:01:16,142 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-16T21:01:16,142 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-16T21:01:16,142 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-16T21:01:16,143 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-16T21:01:16,143 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-16T21:01:16,143 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-16T21:01:16,143 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-16T21:01:16,144 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithResetTtl/4692eddc5cc4a103c5b809ba757d08ab/cf, FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithResetTtl/4692eddc5cc4a103c5b809ba757d08ab/recovered.edits] 2024-12-16T21:01:16,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-16T21:01:16,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-16T21:01:16,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-16T21:01:16,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-16T21:01:16,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:01:16,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:01:16,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:01:16,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:01:16,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=62 2024-12-16T21:01:16,146 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithResetTtl/700f181be5e8b4d3e492309ab68f9d3d/cf, FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithResetTtl/700f181be5e8b4d3e492309ab68f9d3d/recovered.edits] 2024-12-16T21:01:16,151 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithResetTtl/4692eddc5cc4a103c5b809ba757d08ab/cf/2904d7234d5b417690a49480e089382d to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testExportWithResetTtl/4692eddc5cc4a103c5b809ba757d08ab/cf/2904d7234d5b417690a49480e089382d 2024-12-16T21:01:16,154 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithResetTtl/700f181be5e8b4d3e492309ab68f9d3d/cf/f5b8db49da254126939406a31410928b to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testExportWithResetTtl/700f181be5e8b4d3e492309ab68f9d3d/cf/f5b8db49da254126939406a31410928b 2024-12-16T21:01:16,157 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithResetTtl/4692eddc5cc4a103c5b809ba757d08ab/recovered.edits/9.seqid to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testExportWithResetTtl/4692eddc5cc4a103c5b809ba757d08ab/recovered.edits/9.seqid 2024-12-16T21:01:16,159 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithResetTtl/4692eddc5cc4a103c5b809ba757d08ab 2024-12-16T21:01:16,159 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithResetTtl/700f181be5e8b4d3e492309ab68f9d3d/recovered.edits/9.seqid to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testExportWithResetTtl/700f181be5e8b4d3e492309ab68f9d3d/recovered.edits/9.seqid 2024-12-16T21:01:16,160 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithResetTtl/700f181be5e8b4d3e492309ab68f9d3d 2024-12-16T21:01:16,160 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithResetTtl regions 2024-12-16T21:01:16,164 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=62, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-16T21:01:16,169 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportWithResetTtl from hbase:meta 2024-12-16T21:01:16,171 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportWithResetTtl' descriptor. 2024-12-16T21:01:16,173 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=62, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-16T21:01:16,173 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportWithResetTtl' from region states. 2024-12-16T21:01:16,173 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,,1734382853168.4692eddc5cc4a103c5b809ba757d08ab.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734382876173"}]},"ts":"9223372036854775807"} 2024-12-16T21:01:16,173 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,1,1734382853168.700f181be5e8b4d3e492309ab68f9d3d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734382876173"}]},"ts":"9223372036854775807"} 2024-12-16T21:01:16,175 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-16T21:01:16,175 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 4692eddc5cc4a103c5b809ba757d08ab, NAME => 'testtb-testExportWithResetTtl,,1734382853168.4692eddc5cc4a103c5b809ba757d08ab.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 700f181be5e8b4d3e492309ab68f9d3d, NAME => 'testtb-testExportWithResetTtl,1,1734382853168.700f181be5e8b4d3e492309ab68f9d3d.', STARTKEY => '1', ENDKEY => ''}] 2024-12-16T21:01:16,175 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportWithResetTtl' as deleted. 2024-12-16T21:01:16,176 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734382876175"}]},"ts":"9223372036854775807"} 2024-12-16T21:01:16,179 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportWithResetTtl state from META 2024-12-16T21:01:16,182 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=62, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-16T21:01:16,184 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, state=SUCCESS; DeleteTableProcedure table=testtb-testExportWithResetTtl in 49 msec 2024-12-16T21:01:16,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=62 2024-12-16T21:01:16,247 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportWithResetTtl, procId: 62 completed 2024-12-16T21:01:16,257 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithResetTtl" 2024-12-16T21:01:16,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportWithResetTtl 2024-12-16T21:01:16,261 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb-testExportWithResetTtl" 2024-12-16T21:01:16,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb-testExportWithResetTtl 2024-12-16T21:01:16,264 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithResetTtl" 2024-12-16T21:01:16,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportWithResetTtl 2024-12-16T21:01:16,292 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=794 (was 774) Potentially hanging thread: hconnection-0x70b78df7-shared-pool-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-715246216_22 at /127.0.0.1:41496 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x70b78df7-shared-pool-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2017 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: hconnection-0x70b78df7-shared-pool-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: ApplicationMasterLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x70b78df7-shared-pool-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 25346) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x70b78df7-shared-pool-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1451703684_1 at /127.0.0.1:56844 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-715246216_22 at /127.0.0.1:56862 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1451703684_1 at /127.0.0.1:38344 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1891196410) connection to localhost/127.0.0.1:33165 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33165 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-715246216_22 at /127.0.0.1:38382 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x70b78df7-shared-pool-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=813 (was 805) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=374 (was 216) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 17), AvailableMemoryMB=4343 (was 4735) 2024-12-16T21:01:16,292 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=794 is superior to 500 2024-12-16T21:01:16,310 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=794, OpenFileDescriptor=813, MaxFileDescriptor=1048576, SystemLoadAverage=374, ProcessCount=17, AvailableMemoryMB=4342 2024-12-16T21:01:16,310 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=794 is superior to 500 2024-12-16T21:01:16,311 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T21:01:16,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportFileSystemState 2024-12-16T21:01:16,313 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T21:01:16,314 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:01:16,314 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemState" procId is: 63 2024-12-16T21:01:16,315 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T21:01:16,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-16T21:01:16,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741961_1137 (size=407) 2024-12-16T21:01:16,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741961_1137 (size=407) 2024-12-16T21:01:16,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741961_1137 (size=407) 2024-12-16T21:01:16,326 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 1e0be67c5a2679df18447ddfabb4a8b1, NAME => 'testtb-testExportFileSystemState,,1734382876311.1e0be67c5a2679df18447ddfabb4a8b1.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:01:16,327 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => d5d1cb8817001d1d23d072e13c7cd118, NAME => 'testtb-testExportFileSystemState,1,1734382876311.d5d1cb8817001d1d23d072e13c7cd118.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:01:16,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741962_1138 (size=68) 2024-12-16T21:01:16,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741962_1138 (size=68) 2024-12-16T21:01:16,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741963_1139 (size=68) 2024-12-16T21:01:16,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741962_1138 (size=68) 2024-12-16T21:01:16,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741963_1139 (size=68) 2024-12-16T21:01:16,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741963_1139 (size=68) 2024-12-16T21:01:16,342 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemState,1,1734382876311.d5d1cb8817001d1d23d072e13c7cd118.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:01:16,342 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemState,,1734382876311.1e0be67c5a2679df18447ddfabb4a8b1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:01:16,342 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1681): Closing d5d1cb8817001d1d23d072e13c7cd118, disabling compactions & flushes 2024-12-16T21:01:16,342 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1681): Closing 1e0be67c5a2679df18447ddfabb4a8b1, disabling compactions & flushes 2024-12-16T21:01:16,343 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemState,1,1734382876311.d5d1cb8817001d1d23d072e13c7cd118. 2024-12-16T21:01:16,343 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemState,,1734382876311.1e0be67c5a2679df18447ddfabb4a8b1. 2024-12-16T21:01:16,343 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemState,1,1734382876311.d5d1cb8817001d1d23d072e13c7cd118. 2024-12-16T21:01:16,343 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemState,1,1734382876311.d5d1cb8817001d1d23d072e13c7cd118. after waiting 0 ms 2024-12-16T21:01:16,343 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemState,,1734382876311.1e0be67c5a2679df18447ddfabb4a8b1. 2024-12-16T21:01:16,343 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemState,1,1734382876311.d5d1cb8817001d1d23d072e13c7cd118. 2024-12-16T21:01:16,343 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemState,,1734382876311.1e0be67c5a2679df18447ddfabb4a8b1. after waiting 0 ms 2024-12-16T21:01:16,343 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemState,1,1734382876311.d5d1cb8817001d1d23d072e13c7cd118. 2024-12-16T21:01:16,343 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemState,,1734382876311.1e0be67c5a2679df18447ddfabb4a8b1. 2024-12-16T21:01:16,343 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1635): Region close journal for d5d1cb8817001d1d23d072e13c7cd118: 2024-12-16T21:01:16,343 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemState,,1734382876311.1e0be67c5a2679df18447ddfabb4a8b1. 2024-12-16T21:01:16,343 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1635): Region close journal for 1e0be67c5a2679df18447ddfabb4a8b1: 2024-12-16T21:01:16,344 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T21:01:16,344 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,1,1734382876311.d5d1cb8817001d1d23d072e13c7cd118.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1734382876344"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734382876344"}]},"ts":"1734382876344"} 2024-12-16T21:01:16,344 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,,1734382876311.1e0be67c5a2679df18447ddfabb4a8b1.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1734382876344"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734382876344"}]},"ts":"1734382876344"} 2024-12-16T21:01:16,346 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-16T21:01:16,347 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T21:01:16,347 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382876347"}]},"ts":"1734382876347"} 2024-12-16T21:01:16,349 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemState, state=ENABLING in hbase:meta 2024-12-16T21:01:16,353 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(202): Hosts are {6b259cf15929=0} racks are {/default-rack=0} 2024-12-16T21:01:16,354 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-16T21:01:16,354 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-16T21:01:16,354 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-16T21:01:16,354 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-16T21:01:16,354 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-16T21:01:16,354 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-16T21:01:16,354 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-16T21:01:16,355 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=1e0be67c5a2679df18447ddfabb4a8b1, ASSIGN}, {pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=d5d1cb8817001d1d23d072e13c7cd118, ASSIGN}] 2024-12-16T21:01:16,356 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=d5d1cb8817001d1d23d072e13c7cd118, ASSIGN 2024-12-16T21:01:16,356 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=1e0be67c5a2679df18447ddfabb4a8b1, ASSIGN 2024-12-16T21:01:16,356 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=d5d1cb8817001d1d23d072e13c7cd118, ASSIGN; state=OFFLINE, location=6b259cf15929,42323,1734382821139; forceNewPlan=false, retain=false 2024-12-16T21:01:16,357 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=1e0be67c5a2679df18447ddfabb4a8b1, ASSIGN; state=OFFLINE, location=6b259cf15929,41713,1734382821274; forceNewPlan=false, retain=false 2024-12-16T21:01:16,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-16T21:01:16,507 INFO [6b259cf15929:34343 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-16T21:01:16,508 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=64 updating hbase:meta row=1e0be67c5a2679df18447ddfabb4a8b1, regionState=OPENING, regionLocation=6b259cf15929,41713,1734382821274 2024-12-16T21:01:16,508 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=d5d1cb8817001d1d23d072e13c7cd118, regionState=OPENING, regionLocation=6b259cf15929,42323,1734382821139 2024-12-16T21:01:16,510 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=64, state=RUNNABLE; OpenRegionProcedure 1e0be67c5a2679df18447ddfabb4a8b1, server=6b259cf15929,41713,1734382821274}] 2024-12-16T21:01:16,511 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=67, ppid=65, state=RUNNABLE; OpenRegionProcedure d5d1cb8817001d1d23d072e13c7cd118, server=6b259cf15929,42323,1734382821139}] 2024-12-16T21:01:16,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-16T21:01:16,662 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6b259cf15929,41713,1734382821274 2024-12-16T21:01:16,663 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6b259cf15929,42323,1734382821139 2024-12-16T21:01:16,666 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemState,,1734382876311.1e0be67c5a2679df18447ddfabb4a8b1. 2024-12-16T21:01:16,666 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemState,1,1734382876311.d5d1cb8817001d1d23d072e13c7cd118. 2024-12-16T21:01:16,666 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7285): Opening region: {ENCODED => 1e0be67c5a2679df18447ddfabb4a8b1, NAME => 'testtb-testExportFileSystemState,,1734382876311.1e0be67c5a2679df18447ddfabb4a8b1.', STARTKEY => '', ENDKEY => '1'} 2024-12-16T21:01:16,666 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7285): Opening region: {ENCODED => d5d1cb8817001d1d23d072e13c7cd118, NAME => 'testtb-testExportFileSystemState,1,1734382876311.d5d1cb8817001d1d23d072e13c7cd118.', STARTKEY => '1', ENDKEY => ''} 2024-12-16T21:01:16,667 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemState,1,1734382876311.d5d1cb8817001d1d23d072e13c7cd118. service=AccessControlService 2024-12-16T21:01:16,667 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemState,,1734382876311.1e0be67c5a2679df18447ddfabb4a8b1. service=AccessControlService 2024-12-16T21:01:16,667 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T21:01:16,667 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T21:01:16,667 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState 1e0be67c5a2679df18447ddfabb4a8b1 2024-12-16T21:01:16,667 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState d5d1cb8817001d1d23d072e13c7cd118 2024-12-16T21:01:16,667 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemState,1,1734382876311.d5d1cb8817001d1d23d072e13c7cd118.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:01:16,667 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemState,,1734382876311.1e0be67c5a2679df18447ddfabb4a8b1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:01:16,667 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7327): checking encryption for 1e0be67c5a2679df18447ddfabb4a8b1 2024-12-16T21:01:16,667 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7327): checking encryption for d5d1cb8817001d1d23d072e13c7cd118 2024-12-16T21:01:16,667 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7330): checking classloading for 1e0be67c5a2679df18447ddfabb4a8b1 2024-12-16T21:01:16,667 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7330): checking classloading for d5d1cb8817001d1d23d072e13c7cd118 2024-12-16T21:01:16,669 INFO [StoreOpener-1e0be67c5a2679df18447ddfabb4a8b1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 1e0be67c5a2679df18447ddfabb4a8b1 2024-12-16T21:01:16,669 INFO [StoreOpener-d5d1cb8817001d1d23d072e13c7cd118-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d5d1cb8817001d1d23d072e13c7cd118 2024-12-16T21:01:16,671 INFO [StoreOpener-d5d1cb8817001d1d23d072e13c7cd118-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d5d1cb8817001d1d23d072e13c7cd118 columnFamilyName cf 2024-12-16T21:01:16,671 INFO [StoreOpener-1e0be67c5a2679df18447ddfabb4a8b1-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1e0be67c5a2679df18447ddfabb4a8b1 columnFamilyName cf 2024-12-16T21:01:16,671 DEBUG [StoreOpener-1e0be67c5a2679df18447ddfabb4a8b1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:01:16,671 DEBUG [StoreOpener-d5d1cb8817001d1d23d072e13c7cd118-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:01:16,671 INFO [StoreOpener-1e0be67c5a2679df18447ddfabb4a8b1-1 {}] regionserver.HStore(327): Store=1e0be67c5a2679df18447ddfabb4a8b1/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T21:01:16,671 INFO [StoreOpener-d5d1cb8817001d1d23d072e13c7cd118-1 {}] regionserver.HStore(327): Store=d5d1cb8817001d1d23d072e13c7cd118/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T21:01:16,672 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemState/1e0be67c5a2679df18447ddfabb4a8b1 2024-12-16T21:01:16,672 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemState/d5d1cb8817001d1d23d072e13c7cd118 2024-12-16T21:01:16,673 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemState/1e0be67c5a2679df18447ddfabb4a8b1 2024-12-16T21:01:16,673 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemState/d5d1cb8817001d1d23d072e13c7cd118 2024-12-16T21:01:16,677 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1085): writing seq id for 1e0be67c5a2679df18447ddfabb4a8b1 2024-12-16T21:01:16,677 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1085): writing seq id for d5d1cb8817001d1d23d072e13c7cd118 2024-12-16T21:01:16,679 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemState/1e0be67c5a2679df18447ddfabb4a8b1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T21:01:16,679 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemState/d5d1cb8817001d1d23d072e13c7cd118/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T21:01:16,680 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1102): Opened 1e0be67c5a2679df18447ddfabb4a8b1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73773883, jitterRate=0.09931652247905731}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T21:01:16,680 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1102): Opened d5d1cb8817001d1d23d072e13c7cd118; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68944571, jitterRate=0.0273541659116745}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T21:01:16,680 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1001): Region open journal for 1e0be67c5a2679df18447ddfabb4a8b1: 2024-12-16T21:01:16,680 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1001): Region open journal for d5d1cb8817001d1d23d072e13c7cd118: 2024-12-16T21:01:16,681 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemState,,1734382876311.1e0be67c5a2679df18447ddfabb4a8b1., pid=66, masterSystemTime=1734382876662 2024-12-16T21:01:16,681 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemState,1,1734382876311.d5d1cb8817001d1d23d072e13c7cd118., pid=67, masterSystemTime=1734382876663 2024-12-16T21:01:16,683 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemState,,1734382876311.1e0be67c5a2679df18447ddfabb4a8b1. 2024-12-16T21:01:16,683 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemState,,1734382876311.1e0be67c5a2679df18447ddfabb4a8b1. 2024-12-16T21:01:16,683 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=64 updating hbase:meta row=1e0be67c5a2679df18447ddfabb4a8b1, regionState=OPEN, openSeqNum=2, regionLocation=6b259cf15929,41713,1734382821274 2024-12-16T21:01:16,683 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemState,1,1734382876311.d5d1cb8817001d1d23d072e13c7cd118. 2024-12-16T21:01:16,683 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemState,1,1734382876311.d5d1cb8817001d1d23d072e13c7cd118. 2024-12-16T21:01:16,684 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=d5d1cb8817001d1d23d072e13c7cd118, regionState=OPEN, openSeqNum=2, regionLocation=6b259cf15929,42323,1734382821139 2024-12-16T21:01:16,685 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34343 {}] assignment.AssignmentManager(1526): Unable to acquire lock for regionNode state=OPEN, location=6b259cf15929,41713,1734382821274, table=testtb-testExportFileSystemState, region=1e0be67c5a2679df18447ddfabb4a8b1. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-16T21:01:16,688 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=64 2024-12-16T21:01:16,688 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=64, state=SUCCESS; OpenRegionProcedure 1e0be67c5a2679df18447ddfabb4a8b1, server=6b259cf15929,41713,1734382821274 in 175 msec 2024-12-16T21:01:16,690 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=67, resume processing ppid=65 2024-12-16T21:01:16,690 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=1e0be67c5a2679df18447ddfabb4a8b1, ASSIGN in 333 msec 2024-12-16T21:01:16,690 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, ppid=65, state=SUCCESS; OpenRegionProcedure d5d1cb8817001d1d23d072e13c7cd118, server=6b259cf15929,42323,1734382821139 in 177 msec 2024-12-16T21:01:16,691 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=65, resume processing ppid=63 2024-12-16T21:01:16,692 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, ppid=63, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=d5d1cb8817001d1d23d072e13c7cd118, ASSIGN in 335 msec 2024-12-16T21:01:16,692 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T21:01:16,692 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382876692"}]},"ts":"1734382876692"} 2024-12-16T21:01:16,694 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemState, state=ENABLED in hbase:meta 2024-12-16T21:01:16,696 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T21:01:16,696 DEBUG [PEWorker-3 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportFileSystemState jenkins: RWXCA 2024-12-16T21:01:16,698 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42323 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-16T21:01:16,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:01:16,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:01:16,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:01:16,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:01:16,702 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-16T21:01:16,703 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-16T21:01:16,703 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-16T21:01:16,703 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-16T21:01:16,704 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; CreateTableProcedure table=testtb-testExportFileSystemState in 391 msec 2024-12-16T21:01:16,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-16T21:01:16,919 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportFileSystemState, procId: 63 completed 2024-12-16T21:01:16,919 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportFileSystemState get assigned. Timeout = 60000ms 2024-12-16T21:01:16,919 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T21:01:16,923 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportFileSystemState assigned to meta. Checking AM states. 2024-12-16T21:01:16,923 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T21:01:16,923 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportFileSystemState assigned. 2024-12-16T21:01:16,927 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-16T21:01:16,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734382876927 (current time:1734382876927). 2024-12-16T21:01:16,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-16T21:01:16,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-16T21:01:16,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T21:01:16,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0f335ee5 to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7a77e0a2 2024-12-16T21:01:16,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a9d9c66, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:01:16,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:01:16,935 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45558, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:01:16,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0f335ee5 to 127.0.0.1:61742 2024-12-16T21:01:16,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:01:16,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0353441d to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6d093fb4 2024-12-16T21:01:16,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b01e0df, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:01:16,941 DEBUG [hconnection-0x144f97ed-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:01:16,942 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45572, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:01:16,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:01:16,945 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57978, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:01:16,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0353441d to 127.0.0.1:61742 2024-12-16T21:01:16,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:01:16,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-16T21:01:16,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T21:01:16,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=68, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-16T21:01:16,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-16T21:01:16,949 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-16T21:01:16,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-16T21:01:16,950 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T21:01:16,953 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T21:01:16,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741964_1140 (size=170) 2024-12-16T21:01:16,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741964_1140 (size=170) 2024-12-16T21:01:16,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741964_1140 (size=170) 2024-12-16T21:01:16,966 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T21:01:16,966 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE; SnapshotRegionProcedure 1e0be67c5a2679df18447ddfabb4a8b1}, {pid=70, ppid=68, state=RUNNABLE; SnapshotRegionProcedure d5d1cb8817001d1d23d072e13c7cd118}] 2024-12-16T21:01:16,967 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=69, ppid=68, state=RUNNABLE; SnapshotRegionProcedure 1e0be67c5a2679df18447ddfabb4a8b1 2024-12-16T21:01:16,967 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=70, ppid=68, state=RUNNABLE; SnapshotRegionProcedure d5d1cb8817001d1d23d072e13c7cd118 2024-12-16T21:01:17,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-16T21:01:17,118 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6b259cf15929,41713,1734382821274 2024-12-16T21:01:17,118 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6b259cf15929,42323,1734382821139 2024-12-16T21:01:17,119 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41713 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=69 2024-12-16T21:01:17,119 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42323 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=70 2024-12-16T21:01:17,119 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1734382876311.1e0be67c5a2679df18447ddfabb4a8b1. 2024-12-16T21:01:17,120 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1734382876311.d5d1cb8817001d1d23d072e13c7cd118. 2024-12-16T21:01:17,120 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.HRegion(2538): Flush status journal for 1e0be67c5a2679df18447ddfabb4a8b1: 2024-12-16T21:01:17,120 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.HRegion(2538): Flush status journal for d5d1cb8817001d1d23d072e13c7cd118: 2024-12-16T21:01:17,120 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1734382876311.1e0be67c5a2679df18447ddfabb4a8b1. for emptySnaptb0-testExportFileSystemState completed. 2024-12-16T21:01:17,120 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1734382876311.d5d1cb8817001d1d23d072e13c7cd118. for emptySnaptb0-testExportFileSystemState completed. 2024-12-16T21:01:17,120 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1734382876311.1e0be67c5a2679df18447ddfabb4a8b1.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-16T21:01:17,120 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T21:01:17,120 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1734382876311.d5d1cb8817001d1d23d072e13c7cd118.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-16T21:01:17,120 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-16T21:01:17,120 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T21:01:17,120 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-16T21:01:17,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741965_1141 (size=71) 2024-12-16T21:01:17,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741966_1142 (size=71) 2024-12-16T21:01:17,135 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1734382876311.1e0be67c5a2679df18447ddfabb4a8b1. 2024-12-16T21:01:17,135 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=69 2024-12-16T21:01:17,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.HMaster(4106): Remote procedure done, pid=69 2024-12-16T21:01:17,135 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region 1e0be67c5a2679df18447ddfabb4a8b1 2024-12-16T21:01:17,135 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=69, ppid=68, state=RUNNABLE; SnapshotRegionProcedure 1e0be67c5a2679df18447ddfabb4a8b1 2024-12-16T21:01:17,138 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, ppid=68, state=SUCCESS; SnapshotRegionProcedure 1e0be67c5a2679df18447ddfabb4a8b1 in 170 msec 2024-12-16T21:01:17,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741965_1141 (size=71) 2024-12-16T21:01:17,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741965_1141 (size=71) 2024-12-16T21:01:17,139 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1734382876311.d5d1cb8817001d1d23d072e13c7cd118. 2024-12-16T21:01:17,139 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-12-16T21:01:17,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741966_1142 (size=71) 2024-12-16T21:01:17,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.HMaster(4106): Remote procedure done, pid=70 2024-12-16T21:01:17,139 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region d5d1cb8817001d1d23d072e13c7cd118 2024-12-16T21:01:17,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741966_1142 (size=71) 2024-12-16T21:01:17,139 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=70, ppid=68, state=RUNNABLE; SnapshotRegionProcedure d5d1cb8817001d1d23d072e13c7cd118 2024-12-16T21:01:17,142 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T21:01:17,142 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=68 2024-12-16T21:01:17,143 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=68, state=SUCCESS; SnapshotRegionProcedure d5d1cb8817001d1d23d072e13c7cd118 in 174 msec 2024-12-16T21:01:17,143 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T21:01:17,143 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T21:01:17,143 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemState 2024-12-16T21:01:17,144 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState 2024-12-16T21:01:17,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741967_1143 (size=552) 2024-12-16T21:01:17,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741967_1143 (size=552) 2024-12-16T21:01:17,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741967_1143 (size=552) 2024-12-16T21:01:17,156 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T21:01:17,161 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T21:01:17,161 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/emptySnaptb0-testExportFileSystemState 2024-12-16T21:01:17,162 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T21:01:17,163 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-16T21:01:17,164 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 216 msec 2024-12-16T21:01:17,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-16T21:01:17,253 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState, procId: 68 completed 2024-12-16T21:01:17,263 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41713 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemState,,1734382876311.1e0be67c5a2679df18447ddfabb4a8b1. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T21:01:17,263 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42323 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemState,1,1734382876311.d5d1cb8817001d1d23d072e13c7cd118. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T21:01:17,268 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportFileSystemState 2024-12-16T21:01:17,268 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportFileSystemState,,1734382876311.1e0be67c5a2679df18447ddfabb4a8b1. 2024-12-16T21:01:17,268 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T21:01:17,283 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-16T21:01:17,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734382877283 (current time:1734382877283). 2024-12-16T21:01:17,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-16T21:01:17,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-16T21:01:17,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T21:01:17,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3de63aa3 to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@557e320d 2024-12-16T21:01:17,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@722aa995, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:01:17,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:01:17,296 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45578, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:01:17,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3de63aa3 to 127.0.0.1:61742 2024-12-16T21:01:17,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:01:17,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7e62afb0 to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5a28042b 2024-12-16T21:01:17,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d6968fa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:01:17,304 DEBUG [hconnection-0x6228495d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:01:17,306 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45594, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:01:17,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:01:17,308 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57992, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:01:17,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7e62afb0 to 127.0.0.1:61742 2024-12-16T21:01:17,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:01:17,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-16T21:01:17,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T21:01:17,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-16T21:01:17,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-16T21:01:17,313 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-16T21:01:17,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-16T21:01:17,314 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T21:01:17,317 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T21:01:17,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741968_1144 (size=165) 2024-12-16T21:01:17,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741968_1144 (size=165) 2024-12-16T21:01:17,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741968_1144 (size=165) 2024-12-16T21:01:17,335 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T21:01:17,336 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; SnapshotRegionProcedure 1e0be67c5a2679df18447ddfabb4a8b1}, {pid=73, ppid=71, state=RUNNABLE; SnapshotRegionProcedure d5d1cb8817001d1d23d072e13c7cd118}] 2024-12-16T21:01:17,337 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=72, ppid=71, state=RUNNABLE; SnapshotRegionProcedure 1e0be67c5a2679df18447ddfabb4a8b1 2024-12-16T21:01:17,337 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=73, ppid=71, state=RUNNABLE; SnapshotRegionProcedure d5d1cb8817001d1d23d072e13c7cd118 2024-12-16T21:01:17,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-16T21:01:17,488 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6b259cf15929,41713,1734382821274 2024-12-16T21:01:17,488 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6b259cf15929,42323,1734382821139 2024-12-16T21:01:17,489 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41713 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=72 2024-12-16T21:01:17,489 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42323 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=73 2024-12-16T21:01:17,489 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1734382876311.1e0be67c5a2679df18447ddfabb4a8b1. 2024-12-16T21:01:17,489 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing 1e0be67c5a2679df18447ddfabb4a8b1 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-16T21:01:17,491 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1734382876311.d5d1cb8817001d1d23d072e13c7cd118. 2024-12-16T21:01:17,491 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2837): Flushing d5d1cb8817001d1d23d072e13c7cd118 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-16T21:01:17,521 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemState/1e0be67c5a2679df18447ddfabb4a8b1/.tmp/cf/658c462438a44292a7985542bcaef2bb is 71, key is 0bc81549c766a4dd74bb92276247bd2c/cf:q/1734382877263/Put/seqid=0 2024-12-16T21:01:17,523 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemState/d5d1cb8817001d1d23d072e13c7cd118/.tmp/cf/9113c1a2b60649db9abc0e0bfc801972 is 71, key is 14d3e30b85a1cb237e9b3553fe240e99/cf:q/1734382877263/Put/seqid=0 2024-12-16T21:01:17,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741969_1145 (size=5286) 2024-12-16T21:01:17,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741969_1145 (size=5286) 2024-12-16T21:01:17,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741969_1145 (size=5286) 2024-12-16T21:01:17,530 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemState/1e0be67c5a2679df18447ddfabb4a8b1/.tmp/cf/658c462438a44292a7985542bcaef2bb 2024-12-16T21:01:17,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741970_1146 (size=8324) 2024-12-16T21:01:17,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741970_1146 (size=8324) 2024-12-16T21:01:17,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741970_1146 (size=8324) 2024-12-16T21:01:17,538 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemState/d5d1cb8817001d1d23d072e13c7cd118/.tmp/cf/9113c1a2b60649db9abc0e0bfc801972 2024-12-16T21:01:17,538 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemState/1e0be67c5a2679df18447ddfabb4a8b1/.tmp/cf/658c462438a44292a7985542bcaef2bb as hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemState/1e0be67c5a2679df18447ddfabb4a8b1/cf/658c462438a44292a7985542bcaef2bb 2024-12-16T21:01:17,544 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemState/d5d1cb8817001d1d23d072e13c7cd118/.tmp/cf/9113c1a2b60649db9abc0e0bfc801972 as hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemState/d5d1cb8817001d1d23d072e13c7cd118/cf/9113c1a2b60649db9abc0e0bfc801972 2024-12-16T21:01:17,551 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemState/d5d1cb8817001d1d23d072e13c7cd118/cf/9113c1a2b60649db9abc0e0bfc801972, entries=47, sequenceid=6, filesize=8.1 K 2024-12-16T21:01:17,553 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(3040): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for d5d1cb8817001d1d23d072e13c7cd118 in 62ms, sequenceid=6, compaction requested=false 2024-12-16T21:01:17,553 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState' 2024-12-16T21:01:17,554 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2538): Flush status journal for d5d1cb8817001d1d23d072e13c7cd118: 2024-12-16T21:01:17,554 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1734382876311.d5d1cb8817001d1d23d072e13c7cd118. for snaptb0-testExportFileSystemState completed. 2024-12-16T21:01:17,554 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1734382876311.d5d1cb8817001d1d23d072e13c7cd118.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-16T21:01:17,554 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T21:01:17,554 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemState/d5d1cb8817001d1d23d072e13c7cd118/cf/9113c1a2b60649db9abc0e0bfc801972] hfiles 2024-12-16T21:01:17,554 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemState/d5d1cb8817001d1d23d072e13c7cd118/cf/9113c1a2b60649db9abc0e0bfc801972 for snapshot=snaptb0-testExportFileSystemState 2024-12-16T21:01:17,564 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemState/1e0be67c5a2679df18447ddfabb4a8b1/cf/658c462438a44292a7985542bcaef2bb, entries=3, sequenceid=6, filesize=5.2 K 2024-12-16T21:01:17,565 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 1e0be67c5a2679df18447ddfabb4a8b1 in 76ms, sequenceid=6, compaction requested=false 2024-12-16T21:01:17,565 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for 1e0be67c5a2679df18447ddfabb4a8b1: 2024-12-16T21:01:17,565 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1734382876311.1e0be67c5a2679df18447ddfabb4a8b1. for snaptb0-testExportFileSystemState completed. 2024-12-16T21:01:17,565 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1734382876311.1e0be67c5a2679df18447ddfabb4a8b1.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-16T21:01:17,565 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T21:01:17,565 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemState/1e0be67c5a2679df18447ddfabb4a8b1/cf/658c462438a44292a7985542bcaef2bb] hfiles 2024-12-16T21:01:17,565 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemState/1e0be67c5a2679df18447ddfabb4a8b1/cf/658c462438a44292a7985542bcaef2bb for snapshot=snaptb0-testExportFileSystemState 2024-12-16T21:01:17,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741971_1147 (size=110) 2024-12-16T21:01:17,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741971_1147 (size=110) 2024-12-16T21:01:17,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741971_1147 (size=110) 2024-12-16T21:01:17,575 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1734382876311.d5d1cb8817001d1d23d072e13c7cd118. 2024-12-16T21:01:17,575 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=73 2024-12-16T21:01:17,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.HMaster(4106): Remote procedure done, pid=73 2024-12-16T21:01:17,576 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region d5d1cb8817001d1d23d072e13c7cd118 2024-12-16T21:01:17,576 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=73, ppid=71, state=RUNNABLE; SnapshotRegionProcedure d5d1cb8817001d1d23d072e13c7cd118 2024-12-16T21:01:17,578 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, ppid=71, state=SUCCESS; SnapshotRegionProcedure d5d1cb8817001d1d23d072e13c7cd118 in 242 msec 2024-12-16T21:01:17,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741972_1148 (size=110) 2024-12-16T21:01:17,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741972_1148 (size=110) 2024-12-16T21:01:17,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741972_1148 (size=110) 2024-12-16T21:01:17,592 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1734382876311.1e0be67c5a2679df18447ddfabb4a8b1. 2024-12-16T21:01:17,592 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-12-16T21:01:17,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-12-16T21:01:17,593 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region 1e0be67c5a2679df18447ddfabb4a8b1 2024-12-16T21:01:17,593 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=72, ppid=71, state=RUNNABLE; SnapshotRegionProcedure 1e0be67c5a2679df18447ddfabb4a8b1 2024-12-16T21:01:17,595 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-12-16T21:01:17,595 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; SnapshotRegionProcedure 1e0be67c5a2679df18447ddfabb4a8b1 in 259 msec 2024-12-16T21:01:17,595 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T21:01:17,596 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T21:01:17,596 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T21:01:17,596 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemState 2024-12-16T21:01:17,597 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-16T21:01:17,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741973_1149 (size=630) 2024-12-16T21:01:17,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741973_1149 (size=630) 2024-12-16T21:01:17,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741973_1149 (size=630) 2024-12-16T21:01:17,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-16T21:01:17,619 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T21:01:17,631 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T21:01:17,632 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-16T21:01:17,633 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T21:01:17,633 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-16T21:01:17,634 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 322 msec 2024-12-16T21:01:17,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-16T21:01:17,917 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState, procId: 71 completed 2024-12-16T21:01:17,917 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382877917 2024-12-16T21:01:17,918 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:37263, tgtDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382877917, rawTgtDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382877917, srcFsUri=hdfs://localhost:37263, srcDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:01:17,955 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:37263, inputRoot=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:01:17,955 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-715246216_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382877917, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382877917/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-16T21:01:17,958 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-16T21:01:17,963 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testExportFileSystemState to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382877917/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-16T21:01:17,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741974_1150 (size=630) 2024-12-16T21:01:17,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741974_1150 (size=630) 2024-12-16T21:01:17,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741974_1150 (size=630) 2024-12-16T21:01:18,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741975_1151 (size=165) 2024-12-16T21:01:18,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741975_1151 (size=165) 2024-12-16T21:01:18,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741975_1151 (size=165) 2024-12-16T21:01:18,027 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-16T21:01:18,028 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-16T21:01:18,028 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-16T21:01:18,028 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-16T21:01:19,044 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/hadoop-17321563893769918143.jar 2024-12-16T21:01:19,045 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-16T21:01:19,045 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-16T21:01:19,126 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/hadoop-12859563875690787394.jar 2024-12-16T21:01:19,126 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-16T21:01:19,127 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-16T21:01:19,127 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-16T21:01:19,127 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-16T21:01:19,127 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-16T21:01:19,128 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-16T21:01:19,128 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-16T21:01:19,128 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-16T21:01:19,128 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-16T21:01:19,129 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-16T21:01:19,129 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-16T21:01:19,129 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-16T21:01:19,129 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-16T21:01:19,129 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-16T21:01:19,130 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-16T21:01:19,130 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-16T21:01:19,130 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-16T21:01:19,130 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-16T21:01:19,131 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T21:01:19,131 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T21:01:19,131 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T21:01:19,131 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T21:01:19,132 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T21:01:19,132 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T21:01:19,132 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T21:01:19,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741976_1152 (size=127628) 2024-12-16T21:01:19,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741976_1152 (size=127628) 2024-12-16T21:01:19,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741976_1152 (size=127628) 2024-12-16T21:01:19,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741977_1153 (size=2172137) 2024-12-16T21:01:19,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741977_1153 (size=2172137) 2024-12-16T21:01:19,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741977_1153 (size=2172137) 2024-12-16T21:01:19,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741978_1154 (size=213228) 2024-12-16T21:01:19,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741978_1154 (size=213228) 2024-12-16T21:01:19,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741978_1154 (size=213228) 2024-12-16T21:01:19,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741979_1155 (size=1877034) 2024-12-16T21:01:19,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741979_1155 (size=1877034) 2024-12-16T21:01:19,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741979_1155 (size=1877034) 2024-12-16T21:01:19,282 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T21:01:19,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741980_1156 (size=533455) 2024-12-16T21:01:19,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741980_1156 (size=533455) 2024-12-16T21:01:19,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741980_1156 (size=533455) 2024-12-16T21:01:19,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741981_1157 (size=7280644) 2024-12-16T21:01:19,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741981_1157 (size=7280644) 2024-12-16T21:01:19,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741981_1157 (size=7280644) 2024-12-16T21:01:19,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741982_1158 (size=451756) 2024-12-16T21:01:19,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741982_1158 (size=451756) 2024-12-16T21:01:19,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741982_1158 (size=451756) 2024-12-16T21:01:19,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741983_1159 (size=6350920) 2024-12-16T21:01:19,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741983_1159 (size=6350920) 2024-12-16T21:01:19,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741983_1159 (size=6350920) 2024-12-16T21:01:19,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741984_1160 (size=4188619) 2024-12-16T21:01:19,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741984_1160 (size=4188619) 2024-12-16T21:01:19,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741984_1160 (size=4188619) 2024-12-16T21:01:19,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741985_1161 (size=20406) 2024-12-16T21:01:19,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741985_1161 (size=20406) 2024-12-16T21:01:19,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741985_1161 (size=20406) 2024-12-16T21:01:19,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741986_1162 (size=75495) 2024-12-16T21:01:19,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741986_1162 (size=75495) 2024-12-16T21:01:19,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741986_1162 (size=75495) 2024-12-16T21:01:19,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741987_1163 (size=45609) 2024-12-16T21:01:19,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741987_1163 (size=45609) 2024-12-16T21:01:19,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741987_1163 (size=45609) 2024-12-16T21:01:19,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741988_1164 (size=110084) 2024-12-16T21:01:19,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741988_1164 (size=110084) 2024-12-16T21:01:19,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741988_1164 (size=110084) 2024-12-16T21:01:19,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741989_1165 (size=1323991) 2024-12-16T21:01:19,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741989_1165 (size=1323991) 2024-12-16T21:01:19,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741989_1165 (size=1323991) 2024-12-16T21:01:19,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741990_1166 (size=23076) 2024-12-16T21:01:19,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741990_1166 (size=23076) 2024-12-16T21:01:19,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741990_1166 (size=23076) 2024-12-16T21:01:19,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741991_1167 (size=126803) 2024-12-16T21:01:19,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741991_1167 (size=126803) 2024-12-16T21:01:19,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741991_1167 (size=126803) 2024-12-16T21:01:19,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741992_1168 (size=322274) 2024-12-16T21:01:19,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741992_1168 (size=322274) 2024-12-16T21:01:19,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741992_1168 (size=322274) 2024-12-16T21:01:19,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741993_1169 (size=1832290) 2024-12-16T21:01:19,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741993_1169 (size=1832290) 2024-12-16T21:01:19,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741993_1169 (size=1832290) 2024-12-16T21:01:19,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741994_1170 (size=30081) 2024-12-16T21:01:19,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741994_1170 (size=30081) 2024-12-16T21:01:19,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741994_1170 (size=30081) 2024-12-16T21:01:19,606 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0002_000001 (auth:SIMPLE) from 127.0.0.1:40858 2024-12-16T21:01:19,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741995_1171 (size=53616) 2024-12-16T21:01:19,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741995_1171 (size=53616) 2024-12-16T21:01:19,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741995_1171 (size=53616) 2024-12-16T21:01:19,615 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-0_2/usercache/jenkins/appcache/application_1734382828217_0002/container_1734382828217_0002_01_000001/launch_container.sh] 2024-12-16T21:01:19,615 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-0_2/usercache/jenkins/appcache/application_1734382828217_0002/container_1734382828217_0002_01_000001/container_tokens] 2024-12-16T21:01:19,615 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-0_2/usercache/jenkins/appcache/application_1734382828217_0002/container_1734382828217_0002_01_000001/sysfs] 2024-12-16T21:01:19,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741996_1172 (size=29229) 2024-12-16T21:01:19,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741996_1172 (size=29229) 2024-12-16T21:01:19,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741996_1172 (size=29229) 2024-12-16T21:01:19,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741997_1173 (size=169089) 2024-12-16T21:01:19,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741997_1173 (size=169089) 2024-12-16T21:01:19,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741997_1173 (size=169089) 2024-12-16T21:01:19,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741998_1174 (size=5175431) 2024-12-16T21:01:19,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741998_1174 (size=5175431) 2024-12-16T21:01:19,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741998_1174 (size=5175431) 2024-12-16T21:01:19,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741999_1175 (size=136454) 2024-12-16T21:01:19,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741999_1175 (size=136454) 2024-12-16T21:01:19,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741999_1175 (size=136454) 2024-12-16T21:01:19,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742000_1176 (size=907465) 2024-12-16T21:01:19,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742000_1176 (size=907465) 2024-12-16T21:01:19,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742000_1176 (size=907465) 2024-12-16T21:01:19,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742001_1177 (size=3317408) 2024-12-16T21:01:19,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742001_1177 (size=3317408) 2024-12-16T21:01:19,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742001_1177 (size=3317408) 2024-12-16T21:01:19,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742002_1178 (size=503880) 2024-12-16T21:01:19,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742002_1178 (size=503880) 2024-12-16T21:01:19,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742002_1178 (size=503880) 2024-12-16T21:01:19,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742003_1179 (size=4695811) 2024-12-16T21:01:19,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742003_1179 (size=4695811) 2024-12-16T21:01:19,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742003_1179 (size=4695811) 2024-12-16T21:01:19,754 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-16T21:01:19,757 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportFileSystemState' hfile list 2024-12-16T21:01:19,759 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-16T21:01:19,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742004_1180 (size=344) 2024-12-16T21:01:19,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742004_1180 (size=344) 2024-12-16T21:01:19,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742004_1180 (size=344) 2024-12-16T21:01:19,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742005_1181 (size=15) 2024-12-16T21:01:19,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742005_1181 (size=15) 2024-12-16T21:01:19,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742005_1181 (size=15) 2024-12-16T21:01:19,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742006_1182 (size=304893) 2024-12-16T21:01:19,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742006_1182 (size=304893) 2024-12-16T21:01:19,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742006_1182 (size=304893) 2024-12-16T21:01:19,835 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-16T21:01:19,835 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-16T21:01:20,173 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0003_000001 (auth:SIMPLE) from 127.0.0.1:46844 2024-12-16T21:01:21,095 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-16T21:01:21,136 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-16T21:01:21,136 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-16T21:01:21,137 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-16T21:01:21,137 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-16T21:01:24,591 DEBUG [master/6b259cf15929:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 1e0be67c5a2679df18447ddfabb4a8b1 changed from -1.0 to 0.0, refreshing cache 2024-12-16T21:01:24,601 DEBUG [master/6b259cf15929:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 5ff33ba35f8500425d07b6bfd353cd16 changed from -1.0 to 0.0, refreshing cache 2024-12-16T21:01:24,601 DEBUG [master/6b259cf15929:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region ffdc28263b845c2b16a120a0817fe8a5 changed from -1.0 to 0.0, refreshing cache 2024-12-16T21:01:24,601 DEBUG [master/6b259cf15929:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region d5d1cb8817001d1d23d072e13c7cd118 changed from -1.0 to 0.0, refreshing cache 2024-12-16T21:01:26,531 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0003_000001 (auth:SIMPLE) from 127.0.0.1:43798 2024-12-16T21:01:26,640 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-16T21:01:26,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742007_1183 (size=350567) 2024-12-16T21:01:26,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742007_1183 (size=350567) 2024-12-16T21:01:26,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742007_1183 (size=350567) 2024-12-16T21:01:28,778 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0003_000001 (auth:SIMPLE) from 127.0.0.1:40996 2024-12-16T21:01:33,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742008_1184 (size=8324) 2024-12-16T21:01:33,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742008_1184 (size=8324) 2024-12-16T21:01:33,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742008_1184 (size=8324) 2024-12-16T21:01:33,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742009_1185 (size=5286) 2024-12-16T21:01:33,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742009_1185 (size=5286) 2024-12-16T21:01:33,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742009_1185 (size=5286) 2024-12-16T21:01:33,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742010_1186 (size=17422) 2024-12-16T21:01:33,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742010_1186 (size=17422) 2024-12-16T21:01:33,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742010_1186 (size=17422) 2024-12-16T21:01:33,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742011_1187 (size=465) 2024-12-16T21:01:33,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742011_1187 (size=465) 2024-12-16T21:01:33,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742011_1187 (size=465) 2024-12-16T21:01:33,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742012_1188 (size=17422) 2024-12-16T21:01:33,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742012_1188 (size=17422) 2024-12-16T21:01:33,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742012_1188 (size=17422) 2024-12-16T21:01:33,205 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-0_3/usercache/jenkins/appcache/application_1734382828217_0003/container_1734382828217_0003_01_000002/launch_container.sh] 2024-12-16T21:01:33,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742013_1189 (size=350567) 2024-12-16T21:01:33,205 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-0_3/usercache/jenkins/appcache/application_1734382828217_0003/container_1734382828217_0003_01_000002/container_tokens] 2024-12-16T21:01:33,205 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-0_3/usercache/jenkins/appcache/application_1734382828217_0003/container_1734382828217_0003_01_000002/sysfs] 2024-12-16T21:01:33,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742013_1189 (size=350567) 2024-12-16T21:01:33,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742013_1189 (size=350567) 2024-12-16T21:01:33,225 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0003_000001 (auth:SIMPLE) from 127.0.0.1:50862 2024-12-16T21:01:34,990 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-16T21:01:34,992 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-16T21:01:35,006 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testExportFileSystemState 2024-12-16T21:01:35,006 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-16T21:01:35,007 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-16T21:01:35,007 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-715246216_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-16T21:01:35,007 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-16T21:01:35,007 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-16T21:01:35,007 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-715246216_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382877917/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382877917/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-16T21:01:35,008 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382877917/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-16T21:01:35,008 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382877917/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-16T21:01:35,016 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportFileSystemState 2024-12-16T21:01:35,016 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemState 2024-12-16T21:01:35,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=74, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportFileSystemState 2024-12-16T21:01:35,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=74 2024-12-16T21:01:35,021 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382895020"}]},"ts":"1734382895020"} 2024-12-16T21:01:35,022 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemState, state=DISABLING in hbase:meta 2024-12-16T21:01:35,024 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemState to state=DISABLING 2024-12-16T21:01:35,025 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=75, ppid=74, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportFileSystemState}] 2024-12-16T21:01:35,027 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=1e0be67c5a2679df18447ddfabb4a8b1, UNASSIGN}, {pid=77, ppid=75, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=d5d1cb8817001d1d23d072e13c7cd118, UNASSIGN}] 2024-12-16T21:01:35,027 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=77, ppid=75, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=d5d1cb8817001d1d23d072e13c7cd118, UNASSIGN 2024-12-16T21:01:35,028 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=76, ppid=75, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=1e0be67c5a2679df18447ddfabb4a8b1, UNASSIGN 2024-12-16T21:01:35,028 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=77 updating hbase:meta row=d5d1cb8817001d1d23d072e13c7cd118, regionState=CLOSING, regionLocation=6b259cf15929,42323,1734382821139 2024-12-16T21:01:35,029 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=76 updating hbase:meta row=1e0be67c5a2679df18447ddfabb4a8b1, regionState=CLOSING, regionLocation=6b259cf15929,41713,1734382821274 2024-12-16T21:01:35,029 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T21:01:35,029 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; CloseRegionProcedure d5d1cb8817001d1d23d072e13c7cd118, server=6b259cf15929,42323,1734382821139}] 2024-12-16T21:01:35,030 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T21:01:35,030 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=79, ppid=76, state=RUNNABLE; CloseRegionProcedure 1e0be67c5a2679df18447ddfabb4a8b1, server=6b259cf15929,41713,1734382821274}] 2024-12-16T21:01:35,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=74 2024-12-16T21:01:35,181 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6b259cf15929,42323,1734382821139 2024-12-16T21:01:35,182 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] handler.UnassignRegionHandler(124): Close d5d1cb8817001d1d23d072e13c7cd118 2024-12-16T21:01:35,182 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T21:01:35,182 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1681): Closing d5d1cb8817001d1d23d072e13c7cd118, disabling compactions & flushes 2024-12-16T21:01:35,182 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6b259cf15929,41713,1734382821274 2024-12-16T21:01:35,182 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemState,1,1734382876311.d5d1cb8817001d1d23d072e13c7cd118. 2024-12-16T21:01:35,182 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemState,1,1734382876311.d5d1cb8817001d1d23d072e13c7cd118. 2024-12-16T21:01:35,182 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemState,1,1734382876311.d5d1cb8817001d1d23d072e13c7cd118. after waiting 0 ms 2024-12-16T21:01:35,182 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemState,1,1734382876311.d5d1cb8817001d1d23d072e13c7cd118. 2024-12-16T21:01:35,183 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] handler.UnassignRegionHandler(124): Close 1e0be67c5a2679df18447ddfabb4a8b1 2024-12-16T21:01:35,183 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T21:01:35,183 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1681): Closing 1e0be67c5a2679df18447ddfabb4a8b1, disabling compactions & flushes 2024-12-16T21:01:35,183 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemState,,1734382876311.1e0be67c5a2679df18447ddfabb4a8b1. 2024-12-16T21:01:35,183 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemState,,1734382876311.1e0be67c5a2679df18447ddfabb4a8b1. 2024-12-16T21:01:35,183 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemState,,1734382876311.1e0be67c5a2679df18447ddfabb4a8b1. after waiting 0 ms 2024-12-16T21:01:35,183 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemState,,1734382876311.1e0be67c5a2679df18447ddfabb4a8b1. 2024-12-16T21:01:35,188 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemState/1e0be67c5a2679df18447ddfabb4a8b1/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T21:01:35,188 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemState/d5d1cb8817001d1d23d072e13c7cd118/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T21:01:35,189 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T21:01:35,189 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T21:01:35,189 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemState,1,1734382876311.d5d1cb8817001d1d23d072e13c7cd118. 2024-12-16T21:01:35,189 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1635): Region close journal for d5d1cb8817001d1d23d072e13c7cd118: 2024-12-16T21:01:35,189 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemState,,1734382876311.1e0be67c5a2679df18447ddfabb4a8b1. 2024-12-16T21:01:35,189 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1635): Region close journal for 1e0be67c5a2679df18447ddfabb4a8b1: 2024-12-16T21:01:35,190 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] handler.UnassignRegionHandler(170): Closed d5d1cb8817001d1d23d072e13c7cd118 2024-12-16T21:01:35,191 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=77 updating hbase:meta row=d5d1cb8817001d1d23d072e13c7cd118, regionState=CLOSED 2024-12-16T21:01:35,191 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] handler.UnassignRegionHandler(170): Closed 1e0be67c5a2679df18447ddfabb4a8b1 2024-12-16T21:01:35,191 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=76 updating hbase:meta row=1e0be67c5a2679df18447ddfabb4a8b1, regionState=CLOSED 2024-12-16T21:01:35,194 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-12-16T21:01:35,194 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=79, resume processing ppid=76 2024-12-16T21:01:35,194 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, ppid=76, state=SUCCESS; CloseRegionProcedure 1e0be67c5a2679df18447ddfabb4a8b1, server=6b259cf15929,41713,1734382821274 in 163 msec 2024-12-16T21:01:35,194 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; CloseRegionProcedure d5d1cb8817001d1d23d072e13c7cd118, server=6b259cf15929,42323,1734382821139 in 163 msec 2024-12-16T21:01:35,195 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, ppid=75, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=d5d1cb8817001d1d23d072e13c7cd118, UNASSIGN in 168 msec 2024-12-16T21:01:35,195 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-12-16T21:01:35,195 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=1e0be67c5a2679df18447ddfabb4a8b1, UNASSIGN in 168 msec 2024-12-16T21:01:35,197 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=75, resume processing ppid=74 2024-12-16T21:01:35,197 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, ppid=74, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportFileSystemState in 171 msec 2024-12-16T21:01:35,198 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382895198"}]},"ts":"1734382895198"} 2024-12-16T21:01:35,199 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemState, state=DISABLED in hbase:meta 2024-12-16T21:01:35,201 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemState to state=DISABLED 2024-12-16T21:01:35,203 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, state=SUCCESS; DisableTableProcedure table=testtb-testExportFileSystemState in 186 msec 2024-12-16T21:01:35,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=74 2024-12-16T21:01:35,323 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemState, procId: 74 completed 2024-12-16T21:01:35,323 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemState 2024-12-16T21:01:35,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=80, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-16T21:01:35,325 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=80, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-16T21:01:35,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportFileSystemState 2024-12-16T21:01:35,325 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=80, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-16T21:01:35,327 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42323 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemState 2024-12-16T21:01:35,329 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemState/1e0be67c5a2679df18447ddfabb4a8b1 2024-12-16T21:01:35,329 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemState/d5d1cb8817001d1d23d072e13c7cd118 2024-12-16T21:01:35,331 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemState/d5d1cb8817001d1d23d072e13c7cd118/cf, FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemState/d5d1cb8817001d1d23d072e13c7cd118/recovered.edits] 2024-12-16T21:01:35,331 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemState/1e0be67c5a2679df18447ddfabb4a8b1/cf, FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemState/1e0be67c5a2679df18447ddfabb4a8b1/recovered.edits] 2024-12-16T21:01:35,331 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-16T21:01:35,331 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-16T21:01:35,331 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-16T21:01:35,331 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-16T21:01:35,332 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-16T21:01:35,332 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-16T21:01:35,332 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-16T21:01:35,333 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-16T21:01:35,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-16T21:01:35,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-16T21:01:35,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-16T21:01:35,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:01:35,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:01:35,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-16T21:01:35,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:01:35,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:01:35,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=80 2024-12-16T21:01:35,337 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemState/1e0be67c5a2679df18447ddfabb4a8b1/cf/658c462438a44292a7985542bcaef2bb to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testExportFileSystemState/1e0be67c5a2679df18447ddfabb4a8b1/cf/658c462438a44292a7985542bcaef2bb 2024-12-16T21:01:35,337 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemState/d5d1cb8817001d1d23d072e13c7cd118/cf/9113c1a2b60649db9abc0e0bfc801972 to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testExportFileSystemState/d5d1cb8817001d1d23d072e13c7cd118/cf/9113c1a2b60649db9abc0e0bfc801972 2024-12-16T21:01:35,342 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemState/1e0be67c5a2679df18447ddfabb4a8b1/recovered.edits/9.seqid to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testExportFileSystemState/1e0be67c5a2679df18447ddfabb4a8b1/recovered.edits/9.seqid 2024-12-16T21:01:35,342 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemState/d5d1cb8817001d1d23d072e13c7cd118/recovered.edits/9.seqid to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testExportFileSystemState/d5d1cb8817001d1d23d072e13c7cd118/recovered.edits/9.seqid 2024-12-16T21:01:35,342 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemState/1e0be67c5a2679df18447ddfabb4a8b1 2024-12-16T21:01:35,343 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemState/d5d1cb8817001d1d23d072e13c7cd118 2024-12-16T21:01:35,343 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemState regions 2024-12-16T21:01:35,345 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=80, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-16T21:01:35,348 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportFileSystemState from hbase:meta 2024-12-16T21:01:35,350 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemState' descriptor. 2024-12-16T21:01:35,351 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=80, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-16T21:01:35,351 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemState' from region states. 2024-12-16T21:01:35,351 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,,1734382876311.1e0be67c5a2679df18447ddfabb4a8b1.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734382895351"}]},"ts":"9223372036854775807"} 2024-12-16T21:01:35,351 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,1,1734382876311.d5d1cb8817001d1d23d072e13c7cd118.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734382895351"}]},"ts":"9223372036854775807"} 2024-12-16T21:01:35,353 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-16T21:01:35,353 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 1e0be67c5a2679df18447ddfabb4a8b1, NAME => 'testtb-testExportFileSystemState,,1734382876311.1e0be67c5a2679df18447ddfabb4a8b1.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => d5d1cb8817001d1d23d072e13c7cd118, NAME => 'testtb-testExportFileSystemState,1,1734382876311.d5d1cb8817001d1d23d072e13c7cd118.', STARTKEY => '1', ENDKEY => ''}] 2024-12-16T21:01:35,353 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemState' as deleted. 2024-12-16T21:01:35,354 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734382895353"}]},"ts":"9223372036854775807"} 2024-12-16T21:01:35,355 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportFileSystemState state from META 2024-12-16T21:01:35,358 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=80, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-16T21:01:35,358 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, state=SUCCESS; DeleteTableProcedure table=testtb-testExportFileSystemState in 35 msec 2024-12-16T21:01:35,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=80 2024-12-16T21:01:35,436 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportFileSystemState, procId: 80 completed 2024-12-16T21:01:35,449 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemState" 2024-12-16T21:01:35,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportFileSystemState 2024-12-16T21:01:35,453 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemState" 2024-12-16T21:01:35,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportFileSystemState 2024-12-16T21:01:35,485 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=795 (was 794) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-715246216_22 at /127.0.0.1:45662 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1891196410) connection to localhost/127.0.0.1:34365 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1891196410) connection to localhost/127.0.0.1:38455 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: hconnection-0x70b78df7-shared-pool-17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2662 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: hconnection-0x70b78df7-shared-pool-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40833 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-715246216_22 at /127.0.0.1:34786 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_806406503_1 at /127.0.0.1:34752 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-715246216_22 at /127.0.0.1:41692 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 27912) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x70b78df7-shared-pool-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_806406503_1 at /127.0.0.1:45646 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x70b78df7-shared-pool-19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34365 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=815 (was 813) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=477 (was 374) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 17), AvailableMemoryMB=3908 (was 4342) 2024-12-16T21:01:35,485 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=795 is superior to 500 2024-12-16T21:01:35,507 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=795, OpenFileDescriptor=815, MaxFileDescriptor=1048576, SystemLoadAverage=477, ProcessCount=17, AvailableMemoryMB=3907 2024-12-16T21:01:35,507 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=795 is superior to 500 2024-12-16T21:01:35,509 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T21:01:35,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testConsecutiveExports 2024-12-16T21:01:35,510 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T21:01:35,510 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:01:35,510 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testConsecutiveExports" procId is: 81 2024-12-16T21:01:35,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-16T21:01:35,512 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T21:01:35,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742014_1190 (size=404) 2024-12-16T21:01:35,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742014_1190 (size=404) 2024-12-16T21:01:35,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742014_1190 (size=404) 2024-12-16T21:01:35,525 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 599073b51484cc70afc3d858d8e43f45, NAME => 'testtb-testConsecutiveExports,,1734382895508.599073b51484cc70afc3d858d8e43f45.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:01:35,527 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => afae9793ed4462fba8dccd3f47ddac0f, NAME => 'testtb-testConsecutiveExports,1,1734382895508.afae9793ed4462fba8dccd3f47ddac0f.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:01:35,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742015_1191 (size=65) 2024-12-16T21:01:35,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742015_1191 (size=65) 2024-12-16T21:01:35,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742015_1191 (size=65) 2024-12-16T21:01:35,540 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testConsecutiveExports,,1734382895508.599073b51484cc70afc3d858d8e43f45.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:01:35,540 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1681): Closing 599073b51484cc70afc3d858d8e43f45, disabling compactions & flushes 2024-12-16T21:01:35,540 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testConsecutiveExports,,1734382895508.599073b51484cc70afc3d858d8e43f45. 2024-12-16T21:01:35,540 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testConsecutiveExports,,1734382895508.599073b51484cc70afc3d858d8e43f45. 2024-12-16T21:01:35,540 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testConsecutiveExports,,1734382895508.599073b51484cc70afc3d858d8e43f45. after waiting 0 ms 2024-12-16T21:01:35,540 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testConsecutiveExports,,1734382895508.599073b51484cc70afc3d858d8e43f45. 2024-12-16T21:01:35,540 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testConsecutiveExports,,1734382895508.599073b51484cc70afc3d858d8e43f45. 2024-12-16T21:01:35,540 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1635): Region close journal for 599073b51484cc70afc3d858d8e43f45: 2024-12-16T21:01:35,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742016_1192 (size=65) 2024-12-16T21:01:35,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742016_1192 (size=65) 2024-12-16T21:01:35,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742016_1192 (size=65) 2024-12-16T21:01:35,545 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testConsecutiveExports,1,1734382895508.afae9793ed4462fba8dccd3f47ddac0f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:01:35,545 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1681): Closing afae9793ed4462fba8dccd3f47ddac0f, disabling compactions & flushes 2024-12-16T21:01:35,545 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testConsecutiveExports,1,1734382895508.afae9793ed4462fba8dccd3f47ddac0f. 2024-12-16T21:01:35,545 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testConsecutiveExports,1,1734382895508.afae9793ed4462fba8dccd3f47ddac0f. 2024-12-16T21:01:35,545 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testConsecutiveExports,1,1734382895508.afae9793ed4462fba8dccd3f47ddac0f. after waiting 0 ms 2024-12-16T21:01:35,545 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testConsecutiveExports,1,1734382895508.afae9793ed4462fba8dccd3f47ddac0f. 2024-12-16T21:01:35,545 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testConsecutiveExports,1,1734382895508.afae9793ed4462fba8dccd3f47ddac0f. 2024-12-16T21:01:35,545 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1635): Region close journal for afae9793ed4462fba8dccd3f47ddac0f: 2024-12-16T21:01:35,546 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T21:01:35,547 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,,1734382895508.599073b51484cc70afc3d858d8e43f45.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1734382895547"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734382895547"}]},"ts":"1734382895547"} 2024-12-16T21:01:35,547 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,1,1734382895508.afae9793ed4462fba8dccd3f47ddac0f.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1734382895547"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734382895547"}]},"ts":"1734382895547"} 2024-12-16T21:01:35,552 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-16T21:01:35,553 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T21:01:35,553 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382895553"}]},"ts":"1734382895553"} 2024-12-16T21:01:35,554 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testConsecutiveExports, state=ENABLING in hbase:meta 2024-12-16T21:01:35,561 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(202): Hosts are {6b259cf15929=0} racks are {/default-rack=0} 2024-12-16T21:01:35,563 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-16T21:01:35,563 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-16T21:01:35,563 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-16T21:01:35,563 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-16T21:01:35,563 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-16T21:01:35,563 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-16T21:01:35,563 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-16T21:01:35,563 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=599073b51484cc70afc3d858d8e43f45, ASSIGN}, {pid=83, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=afae9793ed4462fba8dccd3f47ddac0f, ASSIGN}] 2024-12-16T21:01:35,564 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=83, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=afae9793ed4462fba8dccd3f47ddac0f, ASSIGN 2024-12-16T21:01:35,564 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=82, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=599073b51484cc70afc3d858d8e43f45, ASSIGN 2024-12-16T21:01:35,565 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=83, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=afae9793ed4462fba8dccd3f47ddac0f, ASSIGN; state=OFFLINE, location=6b259cf15929,36861,1734382821216; forceNewPlan=false, retain=false 2024-12-16T21:01:35,565 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=82, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=599073b51484cc70afc3d858d8e43f45, ASSIGN; state=OFFLINE, location=6b259cf15929,41713,1734382821274; forceNewPlan=false, retain=false 2024-12-16T21:01:35,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-16T21:01:35,715 INFO [6b259cf15929:34343 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-16T21:01:35,715 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=82 updating hbase:meta row=599073b51484cc70afc3d858d8e43f45, regionState=OPENING, regionLocation=6b259cf15929,41713,1734382821274 2024-12-16T21:01:35,715 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=83 updating hbase:meta row=afae9793ed4462fba8dccd3f47ddac0f, regionState=OPENING, regionLocation=6b259cf15929,36861,1734382821216 2024-12-16T21:01:35,717 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=82, state=RUNNABLE; OpenRegionProcedure 599073b51484cc70afc3d858d8e43f45, server=6b259cf15929,41713,1734382821274}] 2024-12-16T21:01:35,718 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=85, ppid=83, state=RUNNABLE; OpenRegionProcedure afae9793ed4462fba8dccd3f47ddac0f, server=6b259cf15929,36861,1734382821216}] 2024-12-16T21:01:35,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-16T21:01:35,869 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6b259cf15929,41713,1734382821274 2024-12-16T21:01:35,869 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6b259cf15929,36861,1734382821216 2024-12-16T21:01:35,872 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] handler.AssignRegionHandler(135): Open testtb-testConsecutiveExports,,1734382895508.599073b51484cc70afc3d858d8e43f45. 2024-12-16T21:01:35,872 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] handler.AssignRegionHandler(135): Open testtb-testConsecutiveExports,1,1734382895508.afae9793ed4462fba8dccd3f47ddac0f. 2024-12-16T21:01:35,872 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7285): Opening region: {ENCODED => 599073b51484cc70afc3d858d8e43f45, NAME => 'testtb-testConsecutiveExports,,1734382895508.599073b51484cc70afc3d858d8e43f45.', STARTKEY => '', ENDKEY => '1'} 2024-12-16T21:01:35,872 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7285): Opening region: {ENCODED => afae9793ed4462fba8dccd3f47ddac0f, NAME => 'testtb-testConsecutiveExports,1,1734382895508.afae9793ed4462fba8dccd3f47ddac0f.', STARTKEY => '1', ENDKEY => ''} 2024-12-16T21:01:35,872 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testConsecutiveExports,1,1734382895508.afae9793ed4462fba8dccd3f47ddac0f. service=AccessControlService 2024-12-16T21:01:35,873 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testConsecutiveExports,,1734382895508.599073b51484cc70afc3d858d8e43f45. service=AccessControlService 2024-12-16T21:01:35,873 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T21:01:35,873 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T21:01:35,873 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports afae9793ed4462fba8dccd3f47ddac0f 2024-12-16T21:01:35,873 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 599073b51484cc70afc3d858d8e43f45 2024-12-16T21:01:35,873 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(894): Instantiated testtb-testConsecutiveExports,1,1734382895508.afae9793ed4462fba8dccd3f47ddac0f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:01:35,873 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(894): Instantiated testtb-testConsecutiveExports,,1734382895508.599073b51484cc70afc3d858d8e43f45.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:01:35,873 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7327): checking encryption for 599073b51484cc70afc3d858d8e43f45 2024-12-16T21:01:35,873 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7327): checking encryption for afae9793ed4462fba8dccd3f47ddac0f 2024-12-16T21:01:35,873 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7330): checking classloading for 599073b51484cc70afc3d858d8e43f45 2024-12-16T21:01:35,873 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7330): checking classloading for afae9793ed4462fba8dccd3f47ddac0f 2024-12-16T21:01:35,874 INFO [StoreOpener-afae9793ed4462fba8dccd3f47ddac0f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region afae9793ed4462fba8dccd3f47ddac0f 2024-12-16T21:01:35,874 INFO [StoreOpener-599073b51484cc70afc3d858d8e43f45-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 599073b51484cc70afc3d858d8e43f45 2024-12-16T21:01:35,876 INFO [StoreOpener-afae9793ed4462fba8dccd3f47ddac0f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region afae9793ed4462fba8dccd3f47ddac0f columnFamilyName cf 2024-12-16T21:01:35,876 INFO [StoreOpener-599073b51484cc70afc3d858d8e43f45-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 599073b51484cc70afc3d858d8e43f45 columnFamilyName cf 2024-12-16T21:01:35,876 DEBUG [StoreOpener-599073b51484cc70afc3d858d8e43f45-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:01:35,876 DEBUG [StoreOpener-afae9793ed4462fba8dccd3f47ddac0f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:01:35,876 INFO [StoreOpener-afae9793ed4462fba8dccd3f47ddac0f-1 {}] regionserver.HStore(327): Store=afae9793ed4462fba8dccd3f47ddac0f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T21:01:35,876 INFO [StoreOpener-599073b51484cc70afc3d858d8e43f45-1 {}] regionserver.HStore(327): Store=599073b51484cc70afc3d858d8e43f45/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T21:01:35,877 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testConsecutiveExports/afae9793ed4462fba8dccd3f47ddac0f 2024-12-16T21:01:35,877 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testConsecutiveExports/599073b51484cc70afc3d858d8e43f45 2024-12-16T21:01:35,877 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testConsecutiveExports/599073b51484cc70afc3d858d8e43f45 2024-12-16T21:01:35,877 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testConsecutiveExports/afae9793ed4462fba8dccd3f47ddac0f 2024-12-16T21:01:35,879 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(1085): writing seq id for 599073b51484cc70afc3d858d8e43f45 2024-12-16T21:01:35,879 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(1085): writing seq id for afae9793ed4462fba8dccd3f47ddac0f 2024-12-16T21:01:35,881 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testConsecutiveExports/afae9793ed4462fba8dccd3f47ddac0f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T21:01:35,881 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testConsecutiveExports/599073b51484cc70afc3d858d8e43f45/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T21:01:35,882 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(1102): Opened 599073b51484cc70afc3d858d8e43f45; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69709451, jitterRate=0.038751766085624695}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T21:01:35,882 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(1102): Opened afae9793ed4462fba8dccd3f47ddac0f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67195402, jitterRate=0.0012895166873931885}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T21:01:35,883 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(1001): Region open journal for 599073b51484cc70afc3d858d8e43f45: 2024-12-16T21:01:35,883 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(1001): Region open journal for afae9793ed4462fba8dccd3f47ddac0f: 2024-12-16T21:01:35,883 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testConsecutiveExports,,1734382895508.599073b51484cc70afc3d858d8e43f45., pid=84, masterSystemTime=1734382895869 2024-12-16T21:01:35,883 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testConsecutiveExports,1,1734382895508.afae9793ed4462fba8dccd3f47ddac0f., pid=85, masterSystemTime=1734382895869 2024-12-16T21:01:35,885 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testConsecutiveExports,1,1734382895508.afae9793ed4462fba8dccd3f47ddac0f. 2024-12-16T21:01:35,885 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] handler.AssignRegionHandler(164): Opened testtb-testConsecutiveExports,1,1734382895508.afae9793ed4462fba8dccd3f47ddac0f. 2024-12-16T21:01:35,885 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=83 updating hbase:meta row=afae9793ed4462fba8dccd3f47ddac0f, regionState=OPEN, openSeqNum=2, regionLocation=6b259cf15929,36861,1734382821216 2024-12-16T21:01:35,885 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testConsecutiveExports,,1734382895508.599073b51484cc70afc3d858d8e43f45. 2024-12-16T21:01:35,885 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] handler.AssignRegionHandler(164): Opened testtb-testConsecutiveExports,,1734382895508.599073b51484cc70afc3d858d8e43f45. 2024-12-16T21:01:35,886 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=82 updating hbase:meta row=599073b51484cc70afc3d858d8e43f45, regionState=OPEN, openSeqNum=2, regionLocation=6b259cf15929,41713,1734382821274 2024-12-16T21:01:35,888 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=85, resume processing ppid=83 2024-12-16T21:01:35,888 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, ppid=83, state=SUCCESS; OpenRegionProcedure afae9793ed4462fba8dccd3f47ddac0f, server=6b259cf15929,36861,1734382821216 in 168 msec 2024-12-16T21:01:35,889 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=82 2024-12-16T21:01:35,889 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=82, state=SUCCESS; OpenRegionProcedure 599073b51484cc70afc3d858d8e43f45, server=6b259cf15929,41713,1734382821274 in 170 msec 2024-12-16T21:01:35,889 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, ppid=81, state=SUCCESS; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=afae9793ed4462fba8dccd3f47ddac0f, ASSIGN in 325 msec 2024-12-16T21:01:35,890 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-12-16T21:01:35,890 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=599073b51484cc70afc3d858d8e43f45, ASSIGN in 326 msec 2024-12-16T21:01:35,891 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T21:01:35,891 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382895891"}]},"ts":"1734382895891"} 2024-12-16T21:01:35,892 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testConsecutiveExports, state=ENABLED in hbase:meta 2024-12-16T21:01:35,973 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T21:01:35,974 DEBUG [PEWorker-2 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testConsecutiveExports jenkins: RWXCA 2024-12-16T21:01:35,984 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42323 {}] access.PermissionStorage(611): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-16T21:01:36,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:01:36,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:01:36,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:01:36,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:01:36,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-16T21:01:36,180 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-16T21:01:36,180 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-16T21:01:36,180 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-16T21:01:36,180 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-16T21:01:36,181 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; CreateTableProcedure table=testtb-testConsecutiveExports in 671 msec 2024-12-16T21:01:36,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-16T21:01:36,615 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testConsecutiveExports, procId: 81 completed 2024-12-16T21:01:36,616 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testConsecutiveExports get assigned. Timeout = 60000ms 2024-12-16T21:01:36,616 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T21:01:36,619 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testConsecutiveExports assigned to meta. Checking AM states. 2024-12-16T21:01:36,619 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T21:01:36,619 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testConsecutiveExports assigned. 2024-12-16T21:01:36,622 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-16T21:01:36,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734382896622 (current time:1734382896622). 2024-12-16T21:01:36,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-16T21:01:36,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-16T21:01:36,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T21:01:36,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1ce2288b to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5c6299cf 2024-12-16T21:01:36,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e75ebd1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:01:36,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:01:36,652 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44002, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:01:36,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1ce2288b to 127.0.0.1:61742 2024-12-16T21:01:36,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:01:36,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x740b0586 to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@564d4069 2024-12-16T21:01:36,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27970212, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:01:36,713 DEBUG [hconnection-0x3cf4cbf4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:01:36,714 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44016, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:01:36,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:01:36,716 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54136, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:01:36,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x740b0586 to 127.0.0.1:61742 2024-12-16T21:01:36,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:01:36,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] access.PermissionStorage(611): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-16T21:01:36,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T21:01:36,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=86, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-16T21:01:36,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 86 2024-12-16T21:01:36,720 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-16T21:01:36,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-16T21:01:36,721 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T21:01:36,723 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T21:01:36,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742017_1193 (size=161) 2024-12-16T21:01:36,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742017_1193 (size=161) 2024-12-16T21:01:36,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742017_1193 (size=161) 2024-12-16T21:01:36,731 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T21:01:36,732 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=87, ppid=86, state=RUNNABLE; SnapshotRegionProcedure 599073b51484cc70afc3d858d8e43f45}, {pid=88, ppid=86, state=RUNNABLE; SnapshotRegionProcedure afae9793ed4462fba8dccd3f47ddac0f}] 2024-12-16T21:01:36,732 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=88, ppid=86, state=RUNNABLE; SnapshotRegionProcedure afae9793ed4462fba8dccd3f47ddac0f 2024-12-16T21:01:36,732 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=87, ppid=86, state=RUNNABLE; SnapshotRegionProcedure 599073b51484cc70afc3d858d8e43f45 2024-12-16T21:01:36,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-16T21:01:36,883 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6b259cf15929,36861,1734382821216 2024-12-16T21:01:36,883 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6b259cf15929,41713,1734382821274 2024-12-16T21:01:36,884 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36861 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=88 2024-12-16T21:01:36,884 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41713 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=87 2024-12-16T21:01:36,884 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1734382895508.599073b51484cc70afc3d858d8e43f45. 2024-12-16T21:01:36,884 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1734382895508.afae9793ed4462fba8dccd3f47ddac0f. 2024-12-16T21:01:36,884 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for afae9793ed4462fba8dccd3f47ddac0f: 2024-12-16T21:01:36,884 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.HRegion(2538): Flush status journal for 599073b51484cc70afc3d858d8e43f45: 2024-12-16T21:01:36,884 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1734382895508.afae9793ed4462fba8dccd3f47ddac0f. for emptySnaptb0-testConsecutiveExports completed. 2024-12-16T21:01:36,884 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1734382895508.599073b51484cc70afc3d858d8e43f45. for emptySnaptb0-testConsecutiveExports completed. 2024-12-16T21:01:36,885 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1734382895508.599073b51484cc70afc3d858d8e43f45.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-16T21:01:36,885 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1734382895508.afae9793ed4462fba8dccd3f47ddac0f.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-16T21:01:36,885 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T21:01:36,885 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T21:01:36,885 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-16T21:01:36,885 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-16T21:01:36,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742019_1195 (size=68) 2024-12-16T21:01:36,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742019_1195 (size=68) 2024-12-16T21:01:36,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742019_1195 (size=68) 2024-12-16T21:01:36,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742018_1194 (size=68) 2024-12-16T21:01:36,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742018_1194 (size=68) 2024-12-16T21:01:36,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742018_1194 (size=68) 2024-12-16T21:01:36,896 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1734382895508.599073b51484cc70afc3d858d8e43f45. 2024-12-16T21:01:36,896 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=87 2024-12-16T21:01:36,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.HMaster(4106): Remote procedure done, pid=87 2024-12-16T21:01:36,896 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 599073b51484cc70afc3d858d8e43f45 2024-12-16T21:01:36,897 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=87, ppid=86, state=RUNNABLE; SnapshotRegionProcedure 599073b51484cc70afc3d858d8e43f45 2024-12-16T21:01:36,898 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, ppid=86, state=SUCCESS; SnapshotRegionProcedure 599073b51484cc70afc3d858d8e43f45 in 165 msec 2024-12-16T21:01:37,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-16T21:01:37,296 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1734382895508.afae9793ed4462fba8dccd3f47ddac0f. 2024-12-16T21:01:37,297 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-12-16T21:01:37,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-12-16T21:01:37,298 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region afae9793ed4462fba8dccd3f47ddac0f 2024-12-16T21:01:37,298 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=88, ppid=86, state=RUNNABLE; SnapshotRegionProcedure afae9793ed4462fba8dccd3f47ddac0f 2024-12-16T21:01:37,301 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=86 2024-12-16T21:01:37,301 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=86, state=SUCCESS; SnapshotRegionProcedure afae9793ed4462fba8dccd3f47ddac0f in 567 msec 2024-12-16T21:01:37,301 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T21:01:37,302 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T21:01:37,303 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T21:01:37,303 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testConsecutiveExports 2024-12-16T21:01:37,304 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports 2024-12-16T21:01:37,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742020_1196 (size=543) 2024-12-16T21:01:37,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742020_1196 (size=543) 2024-12-16T21:01:37,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742020_1196 (size=543) 2024-12-16T21:01:37,316 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T21:01:37,321 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T21:01:37,321 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/emptySnaptb0-testConsecutiveExports 2024-12-16T21:01:37,322 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T21:01:37,322 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 86 2024-12-16T21:01:37,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-16T21:01:37,323 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 604 msec 2024-12-16T21:01:37,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-16T21:01:37,825 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports, procId: 86 completed 2024-12-16T21:01:37,832 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41713 {}] regionserver.HRegion(8254): writing data to region testtb-testConsecutiveExports,,1734382895508.599073b51484cc70afc3d858d8e43f45. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T21:01:37,833 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] regionserver.HRegion(8254): writing data to region testtb-testConsecutiveExports,1,1734382895508.afae9793ed4462fba8dccd3f47ddac0f. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T21:01:37,838 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testConsecutiveExports 2024-12-16T21:01:37,838 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testConsecutiveExports,,1734382895508.599073b51484cc70afc3d858d8e43f45. 2024-12-16T21:01:37,838 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T21:01:37,850 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-16T21:01:37,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734382897850 (current time:1734382897850). 2024-12-16T21:01:37,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-16T21:01:37,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-16T21:01:37,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T21:01:37,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x33158ab6 to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@29244c4b 2024-12-16T21:01:37,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47c346a0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:01:37,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:01:37,857 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44022, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:01:37,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x33158ab6 to 127.0.0.1:61742 2024-12-16T21:01:37,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:01:37,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1804ce2a to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@13c98f75 2024-12-16T21:01:37,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e623dea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:01:37,865 DEBUG [hconnection-0x14c11127-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:01:37,865 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44028, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:01:37,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:01:37,868 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54148, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:01:37,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1804ce2a to 127.0.0.1:61742 2024-12-16T21:01:37,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:01:37,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] access.PermissionStorage(611): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-16T21:01:37,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T21:01:37,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-16T21:01:37,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 89 2024-12-16T21:01:37,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-16T21:01:37,874 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-16T21:01:37,875 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T21:01:37,877 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T21:01:37,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742021_1197 (size=156) 2024-12-16T21:01:37,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742021_1197 (size=156) 2024-12-16T21:01:37,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742021_1197 (size=156) 2024-12-16T21:01:37,887 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T21:01:37,887 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; SnapshotRegionProcedure 599073b51484cc70afc3d858d8e43f45}, {pid=91, ppid=89, state=RUNNABLE; SnapshotRegionProcedure afae9793ed4462fba8dccd3f47ddac0f}] 2024-12-16T21:01:37,888 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=91, ppid=89, state=RUNNABLE; SnapshotRegionProcedure afae9793ed4462fba8dccd3f47ddac0f 2024-12-16T21:01:37,888 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=90, ppid=89, state=RUNNABLE; SnapshotRegionProcedure 599073b51484cc70afc3d858d8e43f45 2024-12-16T21:01:37,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-16T21:01:38,039 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6b259cf15929,36861,1734382821216 2024-12-16T21:01:38,039 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6b259cf15929,41713,1734382821274 2024-12-16T21:01:38,040 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41713 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=90 2024-12-16T21:01:38,040 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36861 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=91 2024-12-16T21:01:38,040 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1734382895508.599073b51484cc70afc3d858d8e43f45. 2024-12-16T21:01:38,040 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1734382895508.afae9793ed4462fba8dccd3f47ddac0f. 2024-12-16T21:01:38,040 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HRegion(2837): Flushing 599073b51484cc70afc3d858d8e43f45 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-16T21:01:38,040 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HRegion(2837): Flushing afae9793ed4462fba8dccd3f47ddac0f 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-16T21:01:38,064 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testConsecutiveExports/599073b51484cc70afc3d858d8e43f45/.tmp/cf/bb68e884abb74089b2f21831485f57e6 is 71, key is 03bc97be2c0eb61da0d937a041b53d5f/cf:q/1734382897832/Put/seqid=0 2024-12-16T21:01:38,065 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testConsecutiveExports/afae9793ed4462fba8dccd3f47ddac0f/.tmp/cf/a2ed60c07042483190e5aa9e1a98fe63 is 71, key is 136db931c43ad435cad953a88797523d/cf:q/1734382897833/Put/seqid=0 2024-12-16T21:01:38,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742022_1198 (size=5288) 2024-12-16T21:01:38,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742022_1198 (size=5288) 2024-12-16T21:01:38,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742022_1198 (size=5288) 2024-12-16T21:01:38,070 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testConsecutiveExports/599073b51484cc70afc3d858d8e43f45/.tmp/cf/bb68e884abb74089b2f21831485f57e6 2024-12-16T21:01:38,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742023_1199 (size=8326) 2024-12-16T21:01:38,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742023_1199 (size=8326) 2024-12-16T21:01:38,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742023_1199 (size=8326) 2024-12-16T21:01:38,072 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testConsecutiveExports/afae9793ed4462fba8dccd3f47ddac0f/.tmp/cf/a2ed60c07042483190e5aa9e1a98fe63 2024-12-16T21:01:38,078 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testConsecutiveExports/599073b51484cc70afc3d858d8e43f45/.tmp/cf/bb68e884abb74089b2f21831485f57e6 as hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testConsecutiveExports/599073b51484cc70afc3d858d8e43f45/cf/bb68e884abb74089b2f21831485f57e6 2024-12-16T21:01:38,078 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testConsecutiveExports/afae9793ed4462fba8dccd3f47ddac0f/.tmp/cf/a2ed60c07042483190e5aa9e1a98fe63 as hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testConsecutiveExports/afae9793ed4462fba8dccd3f47ddac0f/cf/a2ed60c07042483190e5aa9e1a98fe63 2024-12-16T21:01:38,083 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testConsecutiveExports/599073b51484cc70afc3d858d8e43f45/cf/bb68e884abb74089b2f21831485f57e6, entries=3, sequenceid=6, filesize=5.2 K 2024-12-16T21:01:38,083 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testConsecutiveExports/afae9793ed4462fba8dccd3f47ddac0f/cf/a2ed60c07042483190e5aa9e1a98fe63, entries=47, sequenceid=6, filesize=8.1 K 2024-12-16T21:01:38,084 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HRegion(3040): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 599073b51484cc70afc3d858d8e43f45 in 44ms, sequenceid=6, compaction requested=false 2024-12-16T21:01:38,084 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports' 2024-12-16T21:01:38,084 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HRegion(3040): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for afae9793ed4462fba8dccd3f47ddac0f in 44ms, sequenceid=6, compaction requested=false 2024-12-16T21:01:38,084 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports' 2024-12-16T21:01:38,084 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HRegion(2538): Flush status journal for 599073b51484cc70afc3d858d8e43f45: 2024-12-16T21:01:38,084 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HRegion(2538): Flush status journal for afae9793ed4462fba8dccd3f47ddac0f: 2024-12-16T21:01:38,084 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1734382895508.599073b51484cc70afc3d858d8e43f45. for snaptb0-testConsecutiveExports completed. 2024-12-16T21:01:38,084 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1734382895508.afae9793ed4462fba8dccd3f47ddac0f. for snaptb0-testConsecutiveExports completed. 2024-12-16T21:01:38,085 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1734382895508.599073b51484cc70afc3d858d8e43f45.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-16T21:01:38,085 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T21:01:38,085 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testConsecutiveExports/599073b51484cc70afc3d858d8e43f45/cf/bb68e884abb74089b2f21831485f57e6] hfiles 2024-12-16T21:01:38,085 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1734382895508.afae9793ed4462fba8dccd3f47ddac0f.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-16T21:01:38,085 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testConsecutiveExports/599073b51484cc70afc3d858d8e43f45/cf/bb68e884abb74089b2f21831485f57e6 for snapshot=snaptb0-testConsecutiveExports 2024-12-16T21:01:38,085 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T21:01:38,085 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testConsecutiveExports/afae9793ed4462fba8dccd3f47ddac0f/cf/a2ed60c07042483190e5aa9e1a98fe63] hfiles 2024-12-16T21:01:38,085 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testConsecutiveExports/afae9793ed4462fba8dccd3f47ddac0f/cf/a2ed60c07042483190e5aa9e1a98fe63 for snapshot=snaptb0-testConsecutiveExports 2024-12-16T21:01:38,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742025_1201 (size=107) 2024-12-16T21:01:38,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742025_1201 (size=107) 2024-12-16T21:01:38,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742025_1201 (size=107) 2024-12-16T21:01:38,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742024_1200 (size=107) 2024-12-16T21:01:38,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742024_1200 (size=107) 2024-12-16T21:01:38,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742024_1200 (size=107) 2024-12-16T21:01:38,092 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1734382895508.afae9793ed4462fba8dccd3f47ddac0f. 2024-12-16T21:01:38,092 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1734382895508.599073b51484cc70afc3d858d8e43f45. 2024-12-16T21:01:38,092 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=91 2024-12-16T21:01:38,092 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=90 2024-12-16T21:01:38,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster(4106): Remote procedure done, pid=90 2024-12-16T21:01:38,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.HMaster(4106): Remote procedure done, pid=91 2024-12-16T21:01:38,093 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 599073b51484cc70afc3d858d8e43f45 2024-12-16T21:01:38,093 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region afae9793ed4462fba8dccd3f47ddac0f 2024-12-16T21:01:38,093 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=91, ppid=89, state=RUNNABLE; SnapshotRegionProcedure afae9793ed4462fba8dccd3f47ddac0f 2024-12-16T21:01:38,093 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=90, ppid=89, state=RUNNABLE; SnapshotRegionProcedure 599073b51484cc70afc3d858d8e43f45 2024-12-16T21:01:38,095 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, ppid=89, state=SUCCESS; SnapshotRegionProcedure afae9793ed4462fba8dccd3f47ddac0f in 207 msec 2024-12-16T21:01:38,095 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-12-16T21:01:38,095 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; SnapshotRegionProcedure 599073b51484cc70afc3d858d8e43f45 in 207 msec 2024-12-16T21:01:38,095 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T21:01:38,096 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T21:01:38,097 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T21:01:38,097 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testConsecutiveExports 2024-12-16T21:01:38,097 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-16T21:01:38,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742026_1202 (size=621) 2024-12-16T21:01:38,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742026_1202 (size=621) 2024-12-16T21:01:38,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742026_1202 (size=621) 2024-12-16T21:01:38,107 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T21:01:38,112 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T21:01:38,112 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-16T21:01:38,113 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T21:01:38,113 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 89 2024-12-16T21:01:38,114 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 243 msec 2024-12-16T21:01:38,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-16T21:01:38,174 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports, procId: 89 completed 2024-12-16T21:01:38,174 INFO [Time-limited test {}] snapshot.TestExportSnapshot(476): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/local-export-1734382898174 2024-12-16T21:01:38,174 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/local-export-1734382898174, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/local-export-1734382898174, srcFsUri=hdfs://localhost:37263, srcDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:01:38,214 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:37263, inputRoot=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:01:38,214 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=org.apache.hadoop.fs.LocalFileSystem@383a8f10, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/local-export-1734382898174, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/local-export-1734382898174/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-16T21:01:38,217 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-16T21:01:38,221 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/local-export-1734382898174/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-16T21:01:38,252 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-16T21:01:38,252 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-16T21:01:38,253 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-16T21:01:38,253 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-16T21:01:39,308 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0003_000001 (auth:SIMPLE) from 127.0.0.1:41698 2024-12-16T21:01:39,318 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-1_0/usercache/jenkins/appcache/application_1734382828217_0003/container_1734382828217_0003_01_000001/launch_container.sh] 2024-12-16T21:01:39,319 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-1_0/usercache/jenkins/appcache/application_1734382828217_0003/container_1734382828217_0003_01_000001/container_tokens] 2024-12-16T21:01:39,319 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-1_0/usercache/jenkins/appcache/application_1734382828217_0003/container_1734382828217_0003_01_000001/sysfs] 2024-12-16T21:01:39,329 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/hadoop-17995529816948019349.jar 2024-12-16T21:01:39,330 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-16T21:01:39,330 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-16T21:01:39,403 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/hadoop-4081980042997230453.jar 2024-12-16T21:01:39,403 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-16T21:01:39,403 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-16T21:01:39,404 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-16T21:01:39,404 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-16T21:01:39,404 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-16T21:01:39,404 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-16T21:01:39,405 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-16T21:01:39,405 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-16T21:01:39,405 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-16T21:01:39,405 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-16T21:01:39,406 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-16T21:01:39,406 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-16T21:01:39,406 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-16T21:01:39,407 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-16T21:01:39,407 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-16T21:01:39,407 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-16T21:01:39,407 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-16T21:01:39,407 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-16T21:01:39,408 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T21:01:39,408 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T21:01:39,408 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T21:01:39,408 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T21:01:39,409 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T21:01:39,409 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T21:01:39,409 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T21:01:39,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742027_1203 (size=127628) 2024-12-16T21:01:39,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742027_1203 (size=127628) 2024-12-16T21:01:39,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742027_1203 (size=127628) 2024-12-16T21:01:39,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742028_1204 (size=2172137) 2024-12-16T21:01:39,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742028_1204 (size=2172137) 2024-12-16T21:01:39,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742028_1204 (size=2172137) 2024-12-16T21:01:39,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742029_1205 (size=213228) 2024-12-16T21:01:39,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742029_1205 (size=213228) 2024-12-16T21:01:39,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742029_1205 (size=213228) 2024-12-16T21:01:39,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742030_1206 (size=1877034) 2024-12-16T21:01:39,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742030_1206 (size=1877034) 2024-12-16T21:01:39,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742030_1206 (size=1877034) 2024-12-16T21:01:39,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742031_1207 (size=533455) 2024-12-16T21:01:39,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742031_1207 (size=533455) 2024-12-16T21:01:39,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742031_1207 (size=533455) 2024-12-16T21:01:39,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742032_1208 (size=7280644) 2024-12-16T21:01:39,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742032_1208 (size=7280644) 2024-12-16T21:01:39,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742032_1208 (size=7280644) 2024-12-16T21:01:39,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742033_1209 (size=4188619) 2024-12-16T21:01:39,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742033_1209 (size=4188619) 2024-12-16T21:01:39,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742033_1209 (size=4188619) 2024-12-16T21:01:39,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742034_1210 (size=20406) 2024-12-16T21:01:39,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742034_1210 (size=20406) 2024-12-16T21:01:39,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742034_1210 (size=20406) 2024-12-16T21:01:39,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742035_1211 (size=75495) 2024-12-16T21:01:39,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742035_1211 (size=75495) 2024-12-16T21:01:39,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742035_1211 (size=75495) 2024-12-16T21:01:39,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742036_1212 (size=45609) 2024-12-16T21:01:39,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742036_1212 (size=45609) 2024-12-16T21:01:39,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742036_1212 (size=45609) 2024-12-16T21:01:39,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742037_1213 (size=110084) 2024-12-16T21:01:39,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742037_1213 (size=110084) 2024-12-16T21:01:39,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742037_1213 (size=110084) 2024-12-16T21:01:39,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742038_1214 (size=1323991) 2024-12-16T21:01:39,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742038_1214 (size=1323991) 2024-12-16T21:01:39,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742038_1214 (size=1323991) 2024-12-16T21:01:39,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742039_1215 (size=23076) 2024-12-16T21:01:39,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742039_1215 (size=23076) 2024-12-16T21:01:39,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742039_1215 (size=23076) 2024-12-16T21:01:39,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742040_1216 (size=451756) 2024-12-16T21:01:39,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742040_1216 (size=451756) 2024-12-16T21:01:39,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742040_1216 (size=451756) 2024-12-16T21:01:39,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742041_1217 (size=126803) 2024-12-16T21:01:39,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742041_1217 (size=126803) 2024-12-16T21:01:39,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742041_1217 (size=126803) 2024-12-16T21:01:39,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742042_1218 (size=322274) 2024-12-16T21:01:39,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742042_1218 (size=322274) 2024-12-16T21:01:39,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742042_1218 (size=322274) 2024-12-16T21:01:39,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742043_1219 (size=1832290) 2024-12-16T21:01:39,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742043_1219 (size=1832290) 2024-12-16T21:01:39,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742043_1219 (size=1832290) 2024-12-16T21:01:39,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742044_1220 (size=30081) 2024-12-16T21:01:39,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742044_1220 (size=30081) 2024-12-16T21:01:39,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742044_1220 (size=30081) 2024-12-16T21:01:39,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742045_1221 (size=53616) 2024-12-16T21:01:39,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742045_1221 (size=53616) 2024-12-16T21:01:39,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742045_1221 (size=53616) 2024-12-16T21:01:39,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742046_1222 (size=29229) 2024-12-16T21:01:39,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742046_1222 (size=29229) 2024-12-16T21:01:39,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742046_1222 (size=29229) 2024-12-16T21:01:39,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742047_1223 (size=169089) 2024-12-16T21:01:39,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742047_1223 (size=169089) 2024-12-16T21:01:39,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742047_1223 (size=169089) 2024-12-16T21:01:39,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742048_1224 (size=6350920) 2024-12-16T21:01:39,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742048_1224 (size=6350920) 2024-12-16T21:01:39,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742048_1224 (size=6350920) 2024-12-16T21:01:39,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742049_1225 (size=5175431) 2024-12-16T21:01:39,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742049_1225 (size=5175431) 2024-12-16T21:01:39,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742049_1225 (size=5175431) 2024-12-16T21:01:39,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742050_1226 (size=136454) 2024-12-16T21:01:39,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742050_1226 (size=136454) 2024-12-16T21:01:39,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742050_1226 (size=136454) 2024-12-16T21:01:39,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742051_1227 (size=907465) 2024-12-16T21:01:39,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742051_1227 (size=907465) 2024-12-16T21:01:39,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742051_1227 (size=907465) 2024-12-16T21:01:39,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742052_1228 (size=3317408) 2024-12-16T21:01:39,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742052_1228 (size=3317408) 2024-12-16T21:01:39,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742052_1228 (size=3317408) 2024-12-16T21:01:39,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742053_1229 (size=503880) 2024-12-16T21:01:39,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742053_1229 (size=503880) 2024-12-16T21:01:39,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742053_1229 (size=503880) 2024-12-16T21:01:39,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742054_1230 (size=4695811) 2024-12-16T21:01:39,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742054_1230 (size=4695811) 2024-12-16T21:01:39,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742054_1230 (size=4695811) 2024-12-16T21:01:39,992 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-16T21:01:39,995 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-16T21:01:39,997 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-16T21:01:40,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742055_1231 (size=338) 2024-12-16T21:01:40,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742055_1231 (size=338) 2024-12-16T21:01:40,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742055_1231 (size=338) 2024-12-16T21:01:40,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742056_1232 (size=15) 2024-12-16T21:01:40,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742056_1232 (size=15) 2024-12-16T21:01:40,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742056_1232 (size=15) 2024-12-16T21:01:40,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742057_1233 (size=304932) 2024-12-16T21:01:40,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742057_1233 (size=304932) 2024-12-16T21:01:40,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742057_1233 (size=304932) 2024-12-16T21:01:40,065 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-16T21:01:40,065 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-16T21:01:40,192 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0004_000001 (auth:SIMPLE) from 127.0.0.1:50876 2024-12-16T21:01:40,728 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-16T21:01:40,872 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-16T21:01:40,872 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports Metrics about Tables on a single HBase RegionServer 2024-12-16T21:01:40,873 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-16T21:01:46,280 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0004_000001 (auth:SIMPLE) from 127.0.0.1:46168 2024-12-16T21:01:46,375 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-16T21:01:46,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742058_1234 (size=350606) 2024-12-16T21:01:46,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742058_1234 (size=350606) 2024-12-16T21:01:46,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742058_1234 (size=350606) 2024-12-16T21:01:48,586 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0004_000001 (auth:SIMPLE) from 127.0.0.1:44784 2024-12-16T21:01:49,282 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T21:01:51,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742059_1235 (size=17447) 2024-12-16T21:01:51,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742059_1235 (size=17447) 2024-12-16T21:01:51,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742059_1235 (size=17447) 2024-12-16T21:01:51,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742060_1236 (size=462) 2024-12-16T21:01:51,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742060_1236 (size=462) 2024-12-16T21:01:51,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742060_1236 (size=462) 2024-12-16T21:01:52,030 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-0_3/usercache/jenkins/appcache/application_1734382828217_0004/container_1734382828217_0004_01_000002/launch_container.sh] 2024-12-16T21:01:52,030 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-0_3/usercache/jenkins/appcache/application_1734382828217_0004/container_1734382828217_0004_01_000002/container_tokens] 2024-12-16T21:01:52,031 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-0_3/usercache/jenkins/appcache/application_1734382828217_0004/container_1734382828217_0004_01_000002/sysfs] 2024-12-16T21:01:52,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742061_1237 (size=17447) 2024-12-16T21:01:52,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742061_1237 (size=17447) 2024-12-16T21:01:52,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742061_1237 (size=17447) 2024-12-16T21:01:52,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742062_1238 (size=350606) 2024-12-16T21:01:52,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742062_1238 (size=350606) 2024-12-16T21:01:52,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742062_1238 (size=350606) 2024-12-16T21:01:52,068 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0004_000001 (auth:SIMPLE) from 127.0.0.1:60682 2024-12-16T21:01:53,225 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-16T21:01:53,225 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-16T21:01:53,228 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testConsecutiveExports 2024-12-16T21:01:53,228 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-16T21:01:53,228 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-16T21:01:53,228 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-715246216_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-16T21:01:53,229 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-16T21:01:53,229 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-16T21:01:53,229 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in org.apache.hadoop.fs.LocalFileSystem@383a8f10 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/local-export-1734382898174/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/local-export-1734382898174/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-16T21:01:53,229 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/local-export-1734382898174/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-16T21:01:53,230 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/local-export-1734382898174/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-16T21:01:53,232 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/local-export-1734382898174, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/local-export-1734382898174, srcFsUri=hdfs://localhost:37263, srcDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:01:53,270 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:37263, inputRoot=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:01:53,270 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=org.apache.hadoop.fs.LocalFileSystem@383a8f10, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/local-export-1734382898174, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/local-export-1734382898174/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-16T21:01:53,272 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-16T21:01:53,277 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/local-export-1734382898174/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-16T21:01:53,291 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-16T21:01:53,291 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-16T21:01:53,292 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-16T21:01:53,292 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-16T21:01:54,343 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/hadoop-7552355407946770291.jar 2024-12-16T21:01:54,344 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-16T21:01:54,344 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-16T21:01:54,411 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/hadoop-12186384231238305568.jar 2024-12-16T21:01:54,412 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-16T21:01:54,412 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-16T21:01:54,412 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-16T21:01:54,412 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-16T21:01:54,412 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-16T21:01:54,413 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-16T21:01:54,413 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-16T21:01:54,413 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-16T21:01:54,413 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-16T21:01:54,413 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-16T21:01:54,414 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-16T21:01:54,414 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-16T21:01:54,414 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-16T21:01:54,414 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-16T21:01:54,414 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-16T21:01:54,415 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-16T21:01:54,415 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-16T21:01:54,415 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-16T21:01:54,415 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T21:01:54,416 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T21:01:54,416 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T21:01:54,416 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T21:01:54,416 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T21:01:54,416 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T21:01:54,416 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T21:01:54,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742063_1239 (size=127628) 2024-12-16T21:01:54,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742063_1239 (size=127628) 2024-12-16T21:01:54,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742063_1239 (size=127628) 2024-12-16T21:01:54,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742064_1240 (size=2172137) 2024-12-16T21:01:54,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742064_1240 (size=2172137) 2024-12-16T21:01:54,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742064_1240 (size=2172137) 2024-12-16T21:01:54,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742065_1241 (size=213228) 2024-12-16T21:01:54,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742065_1241 (size=213228) 2024-12-16T21:01:54,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742065_1241 (size=213228) 2024-12-16T21:01:54,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742066_1242 (size=1877034) 2024-12-16T21:01:54,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742066_1242 (size=1877034) 2024-12-16T21:01:54,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742066_1242 (size=1877034) 2024-12-16T21:01:54,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742067_1243 (size=533455) 2024-12-16T21:01:54,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742067_1243 (size=533455) 2024-12-16T21:01:54,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742067_1243 (size=533455) 2024-12-16T21:01:54,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742068_1244 (size=7280644) 2024-12-16T21:01:54,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742068_1244 (size=7280644) 2024-12-16T21:01:54,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742068_1244 (size=7280644) 2024-12-16T21:01:54,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742069_1245 (size=4188619) 2024-12-16T21:01:54,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742069_1245 (size=4188619) 2024-12-16T21:01:54,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742069_1245 (size=4188619) 2024-12-16T21:01:54,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742070_1246 (size=20406) 2024-12-16T21:01:54,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742070_1246 (size=20406) 2024-12-16T21:01:54,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742070_1246 (size=20406) 2024-12-16T21:01:54,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742071_1247 (size=75495) 2024-12-16T21:01:54,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742071_1247 (size=75495) 2024-12-16T21:01:54,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742071_1247 (size=75495) 2024-12-16T21:01:54,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742072_1248 (size=45609) 2024-12-16T21:01:54,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742072_1248 (size=45609) 2024-12-16T21:01:54,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742072_1248 (size=45609) 2024-12-16T21:01:54,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742073_1249 (size=110084) 2024-12-16T21:01:54,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742073_1249 (size=110084) 2024-12-16T21:01:54,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742073_1249 (size=110084) 2024-12-16T21:01:54,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742074_1250 (size=1323991) 2024-12-16T21:01:54,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742074_1250 (size=1323991) 2024-12-16T21:01:54,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742074_1250 (size=1323991) 2024-12-16T21:01:54,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742075_1251 (size=23076) 2024-12-16T21:01:54,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742075_1251 (size=23076) 2024-12-16T21:01:54,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742075_1251 (size=23076) 2024-12-16T21:01:54,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742076_1252 (size=126803) 2024-12-16T21:01:54,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742076_1252 (size=126803) 2024-12-16T21:01:54,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742076_1252 (size=126803) 2024-12-16T21:01:54,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742077_1253 (size=322274) 2024-12-16T21:01:54,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742077_1253 (size=322274) 2024-12-16T21:01:54,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742077_1253 (size=322274) 2024-12-16T21:01:54,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742078_1254 (size=1832290) 2024-12-16T21:01:54,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742078_1254 (size=1832290) 2024-12-16T21:01:54,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742078_1254 (size=1832290) 2024-12-16T21:01:54,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742079_1255 (size=30081) 2024-12-16T21:01:54,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742079_1255 (size=30081) 2024-12-16T21:01:54,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742079_1255 (size=30081) 2024-12-16T21:01:54,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742080_1256 (size=53616) 2024-12-16T21:01:54,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742080_1256 (size=53616) 2024-12-16T21:01:54,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742080_1256 (size=53616) 2024-12-16T21:01:54,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742081_1257 (size=29229) 2024-12-16T21:01:54,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742081_1257 (size=29229) 2024-12-16T21:01:54,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742081_1257 (size=29229) 2024-12-16T21:01:54,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742082_1258 (size=169089) 2024-12-16T21:01:54,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742082_1258 (size=169089) 2024-12-16T21:01:54,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742082_1258 (size=169089) 2024-12-16T21:01:54,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742083_1259 (size=6350920) 2024-12-16T21:01:54,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742083_1259 (size=6350920) 2024-12-16T21:01:54,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742083_1259 (size=6350920) 2024-12-16T21:01:54,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742084_1260 (size=5175431) 2024-12-16T21:01:54,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742084_1260 (size=5175431) 2024-12-16T21:01:54,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742084_1260 (size=5175431) 2024-12-16T21:01:54,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742085_1261 (size=136454) 2024-12-16T21:01:54,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742085_1261 (size=136454) 2024-12-16T21:01:54,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742085_1261 (size=136454) 2024-12-16T21:01:54,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742086_1262 (size=907465) 2024-12-16T21:01:54,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742086_1262 (size=907465) 2024-12-16T21:01:54,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742086_1262 (size=907465) 2024-12-16T21:01:54,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742087_1263 (size=3317408) 2024-12-16T21:01:54,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742087_1263 (size=3317408) 2024-12-16T21:01:54,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742087_1263 (size=3317408) 2024-12-16T21:01:54,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742088_1264 (size=503880) 2024-12-16T21:01:54,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742088_1264 (size=503880) 2024-12-16T21:01:54,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742088_1264 (size=503880) 2024-12-16T21:01:54,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742089_1265 (size=451756) 2024-12-16T21:01:54,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742089_1265 (size=451756) 2024-12-16T21:01:54,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742089_1265 (size=451756) 2024-12-16T21:01:54,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742090_1266 (size=4695811) 2024-12-16T21:01:54,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742090_1266 (size=4695811) 2024-12-16T21:01:54,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742090_1266 (size=4695811) 2024-12-16T21:01:54,882 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-16T21:01:54,884 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-16T21:01:54,885 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-16T21:01:54,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742091_1267 (size=338) 2024-12-16T21:01:54,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742091_1267 (size=338) 2024-12-16T21:01:54,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742091_1267 (size=338) 2024-12-16T21:01:54,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742092_1268 (size=15) 2024-12-16T21:01:54,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742092_1268 (size=15) 2024-12-16T21:01:54,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742092_1268 (size=15) 2024-12-16T21:01:54,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742093_1269 (size=304932) 2024-12-16T21:01:54,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742093_1269 (size=304932) 2024-12-16T21:01:54,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742093_1269 (size=304932) 2024-12-16T21:01:58,140 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-16T21:01:58,140 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-16T21:01:58,144 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0004_000001 (auth:SIMPLE) from 127.0.0.1:60698 2024-12-16T21:01:58,155 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-0_1/usercache/jenkins/appcache/application_1734382828217_0004/container_1734382828217_0004_01_000001/launch_container.sh] 2024-12-16T21:01:58,155 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-0_1/usercache/jenkins/appcache/application_1734382828217_0004/container_1734382828217_0004_01_000001/container_tokens] 2024-12-16T21:01:58,155 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-0_1/usercache/jenkins/appcache/application_1734382828217_0004/container_1734382828217_0004_01_000001/sysfs] 2024-12-16T21:01:58,316 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0005_000001 (auth:SIMPLE) from 127.0.0.1:59764 2024-12-16T21:02:04,826 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0005_000001 (auth:SIMPLE) from 127.0.0.1:36486 2024-12-16T21:02:05,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742094_1270 (size=350606) 2024-12-16T21:02:05,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742094_1270 (size=350606) 2024-12-16T21:02:05,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742094_1270 (size=350606) 2024-12-16T21:02:07,068 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0005_000001 (auth:SIMPLE) from 127.0.0.1:49878 2024-12-16T21:02:11,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742095_1271 (size=16925) 2024-12-16T21:02:11,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742095_1271 (size=16925) 2024-12-16T21:02:11,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742095_1271 (size=16925) 2024-12-16T21:02:11,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742096_1272 (size=462) 2024-12-16T21:02:11,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742096_1272 (size=462) 2024-12-16T21:02:11,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742096_1272 (size=462) 2024-12-16T21:02:11,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742097_1273 (size=16925) 2024-12-16T21:02:11,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742097_1273 (size=16925) 2024-12-16T21:02:11,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742097_1273 (size=16925) 2024-12-16T21:02:11,497 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-0_0/usercache/jenkins/appcache/application_1734382828217_0005/container_1734382828217_0005_01_000002/launch_container.sh] 2024-12-16T21:02:11,497 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-0_0/usercache/jenkins/appcache/application_1734382828217_0005/container_1734382828217_0005_01_000002/container_tokens] 2024-12-16T21:02:11,497 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-0_0/usercache/jenkins/appcache/application_1734382828217_0005/container_1734382828217_0005_01_000002/sysfs] 2024-12-16T21:02:11,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742098_1274 (size=350606) 2024-12-16T21:02:11,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742098_1274 (size=350606) 2024-12-16T21:02:11,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742098_1274 (size=350606) 2024-12-16T21:02:11,516 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0005_000001 (auth:SIMPLE) from 127.0.0.1:47458 2024-12-16T21:02:13,291 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-16T21:02:13,292 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-16T21:02:13,294 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testConsecutiveExports 2024-12-16T21:02:13,294 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-16T21:02:13,295 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-16T21:02:13,295 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-715246216_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-16T21:02:13,296 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-16T21:02:13,296 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-16T21:02:13,296 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in org.apache.hadoop.fs.LocalFileSystem@383a8f10 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/local-export-1734382898174/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/local-export-1734382898174/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-16T21:02:13,297 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/local-export-1734382898174/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-16T21:02:13,297 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/local-export-1734382898174/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-16T21:02:13,319 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testConsecutiveExports 2024-12-16T21:02:13,320 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testConsecutiveExports 2024-12-16T21:02:13,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=92, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testConsecutiveExports 2024-12-16T21:02:13,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-16T21:02:13,323 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382933323"}]},"ts":"1734382933323"} 2024-12-16T21:02:13,324 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testConsecutiveExports, state=DISABLING in hbase:meta 2024-12-16T21:02:13,326 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testConsecutiveExports to state=DISABLING 2024-12-16T21:02:13,327 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=93, ppid=92, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testConsecutiveExports}] 2024-12-16T21:02:13,328 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=599073b51484cc70afc3d858d8e43f45, UNASSIGN}, {pid=95, ppid=93, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=afae9793ed4462fba8dccd3f47ddac0f, UNASSIGN}] 2024-12-16T21:02:13,329 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=95, ppid=93, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=afae9793ed4462fba8dccd3f47ddac0f, UNASSIGN 2024-12-16T21:02:13,329 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=94, ppid=93, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=599073b51484cc70afc3d858d8e43f45, UNASSIGN 2024-12-16T21:02:13,329 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=94 updating hbase:meta row=599073b51484cc70afc3d858d8e43f45, regionState=CLOSING, regionLocation=6b259cf15929,41713,1734382821274 2024-12-16T21:02:13,329 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=afae9793ed4462fba8dccd3f47ddac0f, regionState=CLOSING, regionLocation=6b259cf15929,36861,1734382821216 2024-12-16T21:02:13,331 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T21:02:13,331 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=95, state=RUNNABLE; CloseRegionProcedure afae9793ed4462fba8dccd3f47ddac0f, server=6b259cf15929,36861,1734382821216}] 2024-12-16T21:02:13,331 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T21:02:13,332 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=97, ppid=94, state=RUNNABLE; CloseRegionProcedure 599073b51484cc70afc3d858d8e43f45, server=6b259cf15929,41713,1734382821274}] 2024-12-16T21:02:13,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-16T21:02:13,482 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6b259cf15929,36861,1734382821216 2024-12-16T21:02:13,483 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(124): Close afae9793ed4462fba8dccd3f47ddac0f 2024-12-16T21:02:13,483 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T21:02:13,483 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1681): Closing afae9793ed4462fba8dccd3f47ddac0f, disabling compactions & flushes 2024-12-16T21:02:13,483 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1703): Closing region testtb-testConsecutiveExports,1,1734382895508.afae9793ed4462fba8dccd3f47ddac0f. 2024-12-16T21:02:13,483 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testConsecutiveExports,1,1734382895508.afae9793ed4462fba8dccd3f47ddac0f. 2024-12-16T21:02:13,483 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1791): Acquired close lock on testtb-testConsecutiveExports,1,1734382895508.afae9793ed4462fba8dccd3f47ddac0f. after waiting 0 ms 2024-12-16T21:02:13,483 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1801): Updates disabled for region testtb-testConsecutiveExports,1,1734382895508.afae9793ed4462fba8dccd3f47ddac0f. 2024-12-16T21:02:13,483 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6b259cf15929,41713,1734382821274 2024-12-16T21:02:13,484 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] handler.UnassignRegionHandler(124): Close 599073b51484cc70afc3d858d8e43f45 2024-12-16T21:02:13,484 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T21:02:13,484 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1681): Closing 599073b51484cc70afc3d858d8e43f45, disabling compactions & flushes 2024-12-16T21:02:13,484 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1703): Closing region testtb-testConsecutiveExports,,1734382895508.599073b51484cc70afc3d858d8e43f45. 2024-12-16T21:02:13,484 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testConsecutiveExports,,1734382895508.599073b51484cc70afc3d858d8e43f45. 2024-12-16T21:02:13,484 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1791): Acquired close lock on testtb-testConsecutiveExports,,1734382895508.599073b51484cc70afc3d858d8e43f45. after waiting 0 ms 2024-12-16T21:02:13,484 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1801): Updates disabled for region testtb-testConsecutiveExports,,1734382895508.599073b51484cc70afc3d858d8e43f45. 2024-12-16T21:02:13,490 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testConsecutiveExports/afae9793ed4462fba8dccd3f47ddac0f/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T21:02:13,506 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T21:02:13,506 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1922): Closed testtb-testConsecutiveExports,1,1734382895508.afae9793ed4462fba8dccd3f47ddac0f. 2024-12-16T21:02:13,506 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1635): Region close journal for afae9793ed4462fba8dccd3f47ddac0f: 2024-12-16T21:02:13,507 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testConsecutiveExports/599073b51484cc70afc3d858d8e43f45/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T21:02:13,507 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T21:02:13,508 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1922): Closed testtb-testConsecutiveExports,,1734382895508.599073b51484cc70afc3d858d8e43f45. 2024-12-16T21:02:13,508 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1635): Region close journal for 599073b51484cc70afc3d858d8e43f45: 2024-12-16T21:02:13,509 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(170): Closed afae9793ed4462fba8dccd3f47ddac0f 2024-12-16T21:02:13,509 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=afae9793ed4462fba8dccd3f47ddac0f, regionState=CLOSED 2024-12-16T21:02:13,509 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] handler.UnassignRegionHandler(170): Closed 599073b51484cc70afc3d858d8e43f45 2024-12-16T21:02:13,510 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=94 updating hbase:meta row=599073b51484cc70afc3d858d8e43f45, regionState=CLOSED 2024-12-16T21:02:13,513 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=95 2024-12-16T21:02:13,513 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=95, state=SUCCESS; CloseRegionProcedure afae9793ed4462fba8dccd3f47ddac0f, server=6b259cf15929,36861,1734382821216 in 180 msec 2024-12-16T21:02:13,513 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=97, resume processing ppid=94 2024-12-16T21:02:13,514 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, ppid=94, state=SUCCESS; CloseRegionProcedure 599073b51484cc70afc3d858d8e43f45, server=6b259cf15929,41713,1734382821274 in 181 msec 2024-12-16T21:02:13,514 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, ppid=93, state=SUCCESS; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=afae9793ed4462fba8dccd3f47ddac0f, UNASSIGN in 185 msec 2024-12-16T21:02:13,515 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-12-16T21:02:13,515 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=599073b51484cc70afc3d858d8e43f45, UNASSIGN in 185 msec 2024-12-16T21:02:13,516 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=93, resume processing ppid=92 2024-12-16T21:02:13,516 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, ppid=92, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testConsecutiveExports in 188 msec 2024-12-16T21:02:13,517 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382933517"}]},"ts":"1734382933517"} 2024-12-16T21:02:13,519 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testConsecutiveExports, state=DISABLED in hbase:meta 2024-12-16T21:02:13,521 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testConsecutiveExports to state=DISABLED 2024-12-16T21:02:13,523 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, state=SUCCESS; DisableTableProcedure table=testtb-testConsecutiveExports in 202 msec 2024-12-16T21:02:13,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-16T21:02:13,625 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testConsecutiveExports, procId: 92 completed 2024-12-16T21:02:13,626 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testConsecutiveExports 2024-12-16T21:02:13,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=98, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-16T21:02:13,627 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=98, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-16T21:02:13,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testConsecutiveExports 2024-12-16T21:02:13,628 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=98, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-16T21:02:13,630 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42323 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testConsecutiveExports 2024-12-16T21:02:13,633 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testConsecutiveExports/599073b51484cc70afc3d858d8e43f45 2024-12-16T21:02:13,633 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testConsecutiveExports/afae9793ed4462fba8dccd3f47ddac0f 2024-12-16T21:02:13,634 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-16T21:02:13,634 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-16T21:02:13,634 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-16T21:02:13,638 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-16T21:02:13,638 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-16T21:02:13,638 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-16T21:02:13,638 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-16T21:02:13,639 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-16T21:02:13,640 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testConsecutiveExports/599073b51484cc70afc3d858d8e43f45/cf, FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testConsecutiveExports/599073b51484cc70afc3d858d8e43f45/recovered.edits] 2024-12-16T21:02:13,640 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testConsecutiveExports/afae9793ed4462fba8dccd3f47ddac0f/cf, FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testConsecutiveExports/afae9793ed4462fba8dccd3f47ddac0f/recovered.edits] 2024-12-16T21:02:13,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-16T21:02:13,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-16T21:02:13,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-16T21:02:13,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:13,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:13,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-16T21:02:13,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:13,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:13,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-16T21:02:13,645 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testConsecutiveExports/599073b51484cc70afc3d858d8e43f45/cf/bb68e884abb74089b2f21831485f57e6 to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testConsecutiveExports/599073b51484cc70afc3d858d8e43f45/cf/bb68e884abb74089b2f21831485f57e6 2024-12-16T21:02:13,645 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testConsecutiveExports/afae9793ed4462fba8dccd3f47ddac0f/cf/a2ed60c07042483190e5aa9e1a98fe63 to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testConsecutiveExports/afae9793ed4462fba8dccd3f47ddac0f/cf/a2ed60c07042483190e5aa9e1a98fe63 2024-12-16T21:02:13,648 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testConsecutiveExports/599073b51484cc70afc3d858d8e43f45/recovered.edits/9.seqid to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testConsecutiveExports/599073b51484cc70afc3d858d8e43f45/recovered.edits/9.seqid 2024-12-16T21:02:13,649 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testConsecutiveExports/599073b51484cc70afc3d858d8e43f45 2024-12-16T21:02:13,655 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testConsecutiveExports/afae9793ed4462fba8dccd3f47ddac0f/recovered.edits/9.seqid to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testConsecutiveExports/afae9793ed4462fba8dccd3f47ddac0f/recovered.edits/9.seqid 2024-12-16T21:02:13,655 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testConsecutiveExports/afae9793ed4462fba8dccd3f47ddac0f 2024-12-16T21:02:13,655 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testConsecutiveExports regions 2024-12-16T21:02:13,657 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=98, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-16T21:02:13,659 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testConsecutiveExports from hbase:meta 2024-12-16T21:02:13,664 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testConsecutiveExports' descriptor. 2024-12-16T21:02:13,665 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=98, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-16T21:02:13,665 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testConsecutiveExports' from region states. 2024-12-16T21:02:13,665 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,,1734382895508.599073b51484cc70afc3d858d8e43f45.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734382933665"}]},"ts":"9223372036854775807"} 2024-12-16T21:02:13,665 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,1,1734382895508.afae9793ed4462fba8dccd3f47ddac0f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734382933665"}]},"ts":"9223372036854775807"} 2024-12-16T21:02:13,667 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-16T21:02:13,667 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 599073b51484cc70afc3d858d8e43f45, NAME => 'testtb-testConsecutiveExports,,1734382895508.599073b51484cc70afc3d858d8e43f45.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => afae9793ed4462fba8dccd3f47ddac0f, NAME => 'testtb-testConsecutiveExports,1,1734382895508.afae9793ed4462fba8dccd3f47ddac0f.', STARTKEY => '1', ENDKEY => ''}] 2024-12-16T21:02:13,667 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testConsecutiveExports' as deleted. 2024-12-16T21:02:13,667 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734382933667"}]},"ts":"9223372036854775807"} 2024-12-16T21:02:13,671 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testConsecutiveExports state from META 2024-12-16T21:02:13,673 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=98, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-16T21:02:13,674 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, state=SUCCESS; DeleteTableProcedure table=testtb-testConsecutiveExports in 47 msec 2024-12-16T21:02:13,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-16T21:02:13,742 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testConsecutiveExports, procId: 98 completed 2024-12-16T21:02:13,750 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testConsecutiveExports" 2024-12-16T21:02:13,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testConsecutiveExports 2024-12-16T21:02:13,753 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testConsecutiveExports" 2024-12-16T21:02:13,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testConsecutiveExports 2024-12-16T21:02:13,776 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=800 (was 795) Potentially hanging thread: HFileArchiver-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-3894 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: HFileArchiver-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-994895008_1 at /127.0.0.1:44984 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38221 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-715246216_22 at /127.0.0.1:33110 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x70b78df7-shared-pool-24 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1891196410) connection to localhost/127.0.0.1:38221 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-994895008_1 at /127.0.0.1:33084 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x70b78df7-shared-pool-25 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42957 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-715246216_22 at /127.0.0.1:57000 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 573) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x70b78df7-shared-pool-23 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46387 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-715246216_22 at /127.0.0.1:45016 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x70b78df7-shared-pool-22 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=815 (was 815), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=451 (was 477), ProcessCount=17 (was 17), AvailableMemoryMB=3557 (was 3907) 2024-12-16T21:02:13,776 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=800 is superior to 500 2024-12-16T21:02:13,797 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=800, OpenFileDescriptor=815, MaxFileDescriptor=1048576, SystemLoadAverage=451, ProcessCount=17, AvailableMemoryMB=3557 2024-12-16T21:02:13,797 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=800 is superior to 500 2024-12-16T21:02:13,799 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T21:02:13,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=99, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T21:02:13,800 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T21:02:13,801 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:02:13,801 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion" procId is: 99 2024-12-16T21:02:13,801 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T21:02:13,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-16T21:02:13,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742099_1275 (size=422) 2024-12-16T21:02:13,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742099_1275 (size=422) 2024-12-16T21:02:13,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742099_1275 (size=422) 2024-12-16T21:02:13,815 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 84bea62b79efa5fd6cbb8c34a4272c04, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1734382933798.84bea62b79efa5fd6cbb8c34a4272c04.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:02:13,815 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => a85dc09c32e23ad6867b018fba70db05, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1734382933798.a85dc09c32e23ad6867b018fba70db05.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:02:13,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742100_1276 (size=83) 2024-12-16T21:02:13,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742100_1276 (size=83) 2024-12-16T21:02:13,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742100_1276 (size=83) 2024-12-16T21:02:13,828 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1734382933798.84bea62b79efa5fd6cbb8c34a4272c04.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:02:13,828 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1681): Closing 84bea62b79efa5fd6cbb8c34a4272c04, disabling compactions & flushes 2024-12-16T21:02:13,828 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1734382933798.84bea62b79efa5fd6cbb8c34a4272c04. 2024-12-16T21:02:13,828 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1734382933798.84bea62b79efa5fd6cbb8c34a4272c04. 2024-12-16T21:02:13,828 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1734382933798.84bea62b79efa5fd6cbb8c34a4272c04. after waiting 0 ms 2024-12-16T21:02:13,828 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1734382933798.84bea62b79efa5fd6cbb8c34a4272c04. 2024-12-16T21:02:13,828 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion,,1734382933798.84bea62b79efa5fd6cbb8c34a4272c04. 2024-12-16T21:02:13,828 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1635): Region close journal for 84bea62b79efa5fd6cbb8c34a4272c04: 2024-12-16T21:02:13,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742101_1277 (size=83) 2024-12-16T21:02:13,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742101_1277 (size=83) 2024-12-16T21:02:13,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742101_1277 (size=83) 2024-12-16T21:02:13,832 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1734382933798.a85dc09c32e23ad6867b018fba70db05.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:02:13,832 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1681): Closing a85dc09c32e23ad6867b018fba70db05, disabling compactions & flushes 2024-12-16T21:02:13,832 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1734382933798.a85dc09c32e23ad6867b018fba70db05. 2024-12-16T21:02:13,832 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1734382933798.a85dc09c32e23ad6867b018fba70db05. 2024-12-16T21:02:13,832 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1734382933798.a85dc09c32e23ad6867b018fba70db05. after waiting 0 ms 2024-12-16T21:02:13,832 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1734382933798.a85dc09c32e23ad6867b018fba70db05. 2024-12-16T21:02:13,833 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1734382933798.a85dc09c32e23ad6867b018fba70db05. 2024-12-16T21:02:13,833 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1635): Region close journal for a85dc09c32e23ad6867b018fba70db05: 2024-12-16T21:02:13,833 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T21:02:13,834 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1734382933798.84bea62b79efa5fd6cbb8c34a4272c04.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1734382933834"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734382933834"}]},"ts":"1734382933834"} 2024-12-16T21:02:13,834 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1734382933798.a85dc09c32e23ad6867b018fba70db05.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1734382933834"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734382933834"}]},"ts":"1734382933834"} 2024-12-16T21:02:13,836 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-16T21:02:13,837 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T21:02:13,837 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382933837"}]},"ts":"1734382933837"} 2024-12-16T21:02:13,838 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLING in hbase:meta 2024-12-16T21:02:13,842 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(202): Hosts are {6b259cf15929=0} racks are {/default-rack=0} 2024-12-16T21:02:13,843 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-16T21:02:13,843 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-16T21:02:13,843 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-16T21:02:13,843 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-16T21:02:13,843 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-16T21:02:13,843 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-16T21:02:13,843 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-16T21:02:13,843 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=84bea62b79efa5fd6cbb8c34a4272c04, ASSIGN}, {pid=101, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=a85dc09c32e23ad6867b018fba70db05, ASSIGN}] 2024-12-16T21:02:13,844 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=101, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=a85dc09c32e23ad6867b018fba70db05, ASSIGN 2024-12-16T21:02:13,844 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=100, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=84bea62b79efa5fd6cbb8c34a4272c04, ASSIGN 2024-12-16T21:02:13,846 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=101, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=a85dc09c32e23ad6867b018fba70db05, ASSIGN; state=OFFLINE, location=6b259cf15929,36861,1734382821216; forceNewPlan=false, retain=false 2024-12-16T21:02:13,846 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=100, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=84bea62b79efa5fd6cbb8c34a4272c04, ASSIGN; state=OFFLINE, location=6b259cf15929,41713,1734382821274; forceNewPlan=false, retain=false 2024-12-16T21:02:13,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-16T21:02:13,996 INFO [6b259cf15929:34343 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-16T21:02:13,996 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=100 updating hbase:meta row=84bea62b79efa5fd6cbb8c34a4272c04, regionState=OPENING, regionLocation=6b259cf15929,41713,1734382821274 2024-12-16T21:02:13,996 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=a85dc09c32e23ad6867b018fba70db05, regionState=OPENING, regionLocation=6b259cf15929,36861,1734382821216 2024-12-16T21:02:13,998 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=102, ppid=100, state=RUNNABLE; OpenRegionProcedure 84bea62b79efa5fd6cbb8c34a4272c04, server=6b259cf15929,41713,1734382821274}] 2024-12-16T21:02:13,999 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=101, state=RUNNABLE; OpenRegionProcedure a85dc09c32e23ad6867b018fba70db05, server=6b259cf15929,36861,1734382821216}] 2024-12-16T21:02:14,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-16T21:02:14,150 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6b259cf15929,41713,1734382821274 2024-12-16T21:02:14,150 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6b259cf15929,36861,1734382821216 2024-12-16T21:02:14,153 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion,1,1734382933798.a85dc09c32e23ad6867b018fba70db05. 2024-12-16T21:02:14,153 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7285): Opening region: {ENCODED => a85dc09c32e23ad6867b018fba70db05, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1734382933798.a85dc09c32e23ad6867b018fba70db05.', STARTKEY => '1', ENDKEY => ''} 2024-12-16T21:02:14,154 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,1,1734382933798.a85dc09c32e23ad6867b018fba70db05. service=AccessControlService 2024-12-16T21:02:14,154 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T21:02:14,154 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion a85dc09c32e23ad6867b018fba70db05 2024-12-16T21:02:14,154 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1734382933798.a85dc09c32e23ad6867b018fba70db05.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:02:14,154 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7327): checking encryption for a85dc09c32e23ad6867b018fba70db05 2024-12-16T21:02:14,154 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7330): checking classloading for a85dc09c32e23ad6867b018fba70db05 2024-12-16T21:02:14,163 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion,,1734382933798.84bea62b79efa5fd6cbb8c34a4272c04. 2024-12-16T21:02:14,163 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7285): Opening region: {ENCODED => 84bea62b79efa5fd6cbb8c34a4272c04, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1734382933798.84bea62b79efa5fd6cbb8c34a4272c04.', STARTKEY => '', ENDKEY => '1'} 2024-12-16T21:02:14,163 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,,1734382933798.84bea62b79efa5fd6cbb8c34a4272c04. service=AccessControlService 2024-12-16T21:02:14,164 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T21:02:14,164 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 84bea62b79efa5fd6cbb8c34a4272c04 2024-12-16T21:02:14,164 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1734382933798.84bea62b79efa5fd6cbb8c34a4272c04.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:02:14,164 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7327): checking encryption for 84bea62b79efa5fd6cbb8c34a4272c04 2024-12-16T21:02:14,164 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7330): checking classloading for 84bea62b79efa5fd6cbb8c34a4272c04 2024-12-16T21:02:14,166 INFO [StoreOpener-84bea62b79efa5fd6cbb8c34a4272c04-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 84bea62b79efa5fd6cbb8c34a4272c04 2024-12-16T21:02:14,167 INFO [StoreOpener-84bea62b79efa5fd6cbb8c34a4272c04-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 84bea62b79efa5fd6cbb8c34a4272c04 columnFamilyName cf 2024-12-16T21:02:14,168 DEBUG [StoreOpener-84bea62b79efa5fd6cbb8c34a4272c04-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:02:14,168 INFO [StoreOpener-84bea62b79efa5fd6cbb8c34a4272c04-1 {}] regionserver.HStore(327): Store=84bea62b79efa5fd6cbb8c34a4272c04/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T21:02:14,169 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion/84bea62b79efa5fd6cbb8c34a4272c04 2024-12-16T21:02:14,170 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion/84bea62b79efa5fd6cbb8c34a4272c04 2024-12-16T21:02:14,173 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1085): writing seq id for 84bea62b79efa5fd6cbb8c34a4272c04 2024-12-16T21:02:14,182 INFO [StoreOpener-a85dc09c32e23ad6867b018fba70db05-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region a85dc09c32e23ad6867b018fba70db05 2024-12-16T21:02:14,183 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion/84bea62b79efa5fd6cbb8c34a4272c04/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T21:02:14,183 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1102): Opened 84bea62b79efa5fd6cbb8c34a4272c04; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67833428, jitterRate=0.010796844959259033}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T21:02:14,184 INFO [StoreOpener-a85dc09c32e23ad6867b018fba70db05-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a85dc09c32e23ad6867b018fba70db05 columnFamilyName cf 2024-12-16T21:02:14,184 DEBUG [StoreOpener-a85dc09c32e23ad6867b018fba70db05-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:02:14,184 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1001): Region open journal for 84bea62b79efa5fd6cbb8c34a4272c04: 2024-12-16T21:02:14,184 INFO [StoreOpener-a85dc09c32e23ad6867b018fba70db05-1 {}] regionserver.HStore(327): Store=a85dc09c32e23ad6867b018fba70db05/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T21:02:14,185 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,,1734382933798.84bea62b79efa5fd6cbb8c34a4272c04., pid=102, masterSystemTime=1734382934150 2024-12-16T21:02:14,185 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion/a85dc09c32e23ad6867b018fba70db05 2024-12-16T21:02:14,186 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion/a85dc09c32e23ad6867b018fba70db05 2024-12-16T21:02:14,186 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,,1734382933798.84bea62b79efa5fd6cbb8c34a4272c04. 2024-12-16T21:02:14,186 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion,,1734382933798.84bea62b79efa5fd6cbb8c34a4272c04. 2024-12-16T21:02:14,187 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=100 updating hbase:meta row=84bea62b79efa5fd6cbb8c34a4272c04, regionState=OPEN, openSeqNum=2, regionLocation=6b259cf15929,41713,1734382821274 2024-12-16T21:02:14,189 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1085): writing seq id for a85dc09c32e23ad6867b018fba70db05 2024-12-16T21:02:14,191 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion/a85dc09c32e23ad6867b018fba70db05/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T21:02:14,192 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1102): Opened a85dc09c32e23ad6867b018fba70db05; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65000116, jitterRate=-0.03142279386520386}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T21:02:14,192 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1001): Region open journal for a85dc09c32e23ad6867b018fba70db05: 2024-12-16T21:02:14,194 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,1,1734382933798.a85dc09c32e23ad6867b018fba70db05., pid=103, masterSystemTime=1734382934150 2024-12-16T21:02:14,195 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=102, resume processing ppid=100 2024-12-16T21:02:14,195 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, ppid=100, state=SUCCESS; OpenRegionProcedure 84bea62b79efa5fd6cbb8c34a4272c04, server=6b259cf15929,41713,1734382821274 in 191 msec 2024-12-16T21:02:14,196 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,1,1734382933798.a85dc09c32e23ad6867b018fba70db05. 2024-12-16T21:02:14,196 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, ppid=99, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=84bea62b79efa5fd6cbb8c34a4272c04, ASSIGN in 352 msec 2024-12-16T21:02:14,196 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion,1,1734382933798.a85dc09c32e23ad6867b018fba70db05. 2024-12-16T21:02:14,197 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=a85dc09c32e23ad6867b018fba70db05, regionState=OPEN, openSeqNum=2, regionLocation=6b259cf15929,36861,1734382821216 2024-12-16T21:02:14,200 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=101 2024-12-16T21:02:14,200 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=101, state=SUCCESS; OpenRegionProcedure a85dc09c32e23ad6867b018fba70db05, server=6b259cf15929,36861,1734382821216 in 199 msec 2024-12-16T21:02:14,202 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=101, resume processing ppid=99 2024-12-16T21:02:14,202 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=99, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=a85dc09c32e23ad6867b018fba70db05, ASSIGN in 357 msec 2024-12-16T21:02:14,202 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T21:02:14,203 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382934202"}]},"ts":"1734382934202"} 2024-12-16T21:02:14,204 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLED in hbase:meta 2024-12-16T21:02:14,207 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T21:02:14,207 DEBUG [PEWorker-4 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion jenkins: RWXCA 2024-12-16T21:02:14,209 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42323 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-16T21:02:14,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:14,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:14,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:14,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:14,213 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:14,214 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:14,214 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:14,214 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:14,215 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, state=SUCCESS; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 414 msec 2024-12-16T21:02:14,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-16T21:02:14,405 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 99 completed 2024-12-16T21:02:14,405 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportFileSystemStateWithMergeRegion get assigned. Timeout = 60000ms 2024-12-16T21:02:14,405 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T21:02:14,411 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned to meta. Checking AM states. 2024-12-16T21:02:14,411 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T21:02:14,411 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned. 2024-12-16T21:02:14,414 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-16T21:02:14,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734382934414 (current time:1734382934414). 2024-12-16T21:02:14,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-16T21:02:14,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-16T21:02:14,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T21:02:14,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x73969f83 to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@43540484 2024-12-16T21:02:14,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@699181ae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:02:14,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:02:14,423 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37076, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:02:14,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x73969f83 to 127.0.0.1:61742 2024-12-16T21:02:14,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:02:14,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x39a77990 to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5ab42a04 2024-12-16T21:02:14,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f4d4c0d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:02:14,440 DEBUG [hconnection-0x1537a812-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:02:14,441 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37090, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:02:14,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:02:14,444 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43422, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:02:14,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x39a77990 to 127.0.0.1:61742 2024-12-16T21:02:14,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:02:14,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-16T21:02:14,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T21:02:14,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-16T21:02:14,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-16T21:02:14,450 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-16T21:02:14,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-16T21:02:14,451 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T21:02:14,454 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T21:02:14,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742102_1278 (size=215) 2024-12-16T21:02:14,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742102_1278 (size=215) 2024-12-16T21:02:14,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742102_1278 (size=215) 2024-12-16T21:02:14,465 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T21:02:14,466 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE; SnapshotRegionProcedure 84bea62b79efa5fd6cbb8c34a4272c04}, {pid=106, ppid=104, state=RUNNABLE; SnapshotRegionProcedure a85dc09c32e23ad6867b018fba70db05}] 2024-12-16T21:02:14,466 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=106, ppid=104, state=RUNNABLE; SnapshotRegionProcedure a85dc09c32e23ad6867b018fba70db05 2024-12-16T21:02:14,467 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=105, ppid=104, state=RUNNABLE; SnapshotRegionProcedure 84bea62b79efa5fd6cbb8c34a4272c04 2024-12-16T21:02:14,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-16T21:02:14,617 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6b259cf15929,41713,1734382821274 2024-12-16T21:02:14,617 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6b259cf15929,36861,1734382821216 2024-12-16T21:02:14,618 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36861 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=106 2024-12-16T21:02:14,618 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41713 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=105 2024-12-16T21:02:14,618 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1734382933798.84bea62b79efa5fd6cbb8c34a4272c04. 2024-12-16T21:02:14,618 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1734382933798.a85dc09c32e23ad6867b018fba70db05. 2024-12-16T21:02:14,619 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2538): Flush status journal for 84bea62b79efa5fd6cbb8c34a4272c04: 2024-12-16T21:02:14,619 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2538): Flush status journal for a85dc09c32e23ad6867b018fba70db05: 2024-12-16T21:02:14,619 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1734382933798.84bea62b79efa5fd6cbb8c34a4272c04. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-16T21:02:14,619 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1734382933798.a85dc09c32e23ad6867b018fba70db05. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-16T21:02:14,619 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1734382933798.84bea62b79efa5fd6cbb8c34a4272c04.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-16T21:02:14,619 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T21:02:14,619 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-16T21:02:14,619 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1734382933798.a85dc09c32e23ad6867b018fba70db05.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-16T21:02:14,619 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T21:02:14,619 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-16T21:02:14,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742104_1280 (size=86) 2024-12-16T21:02:14,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742104_1280 (size=86) 2024-12-16T21:02:14,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742103_1279 (size=86) 2024-12-16T21:02:14,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742103_1279 (size=86) 2024-12-16T21:02:14,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742104_1280 (size=86) 2024-12-16T21:02:14,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742103_1279 (size=86) 2024-12-16T21:02:14,626 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1734382933798.84bea62b79efa5fd6cbb8c34a4272c04. 2024-12-16T21:02:14,626 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-12-16T21:02:14,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster(4106): Remote procedure done, pid=105 2024-12-16T21:02:14,627 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 84bea62b79efa5fd6cbb8c34a4272c04 2024-12-16T21:02:14,627 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=105, ppid=104, state=RUNNABLE; SnapshotRegionProcedure 84bea62b79efa5fd6cbb8c34a4272c04 2024-12-16T21:02:14,628 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; SnapshotRegionProcedure 84bea62b79efa5fd6cbb8c34a4272c04 in 163 msec 2024-12-16T21:02:14,630 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1734382933798.a85dc09c32e23ad6867b018fba70db05. 2024-12-16T21:02:14,630 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=106 2024-12-16T21:02:14,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster(4106): Remote procedure done, pid=106 2024-12-16T21:02:14,630 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region a85dc09c32e23ad6867b018fba70db05 2024-12-16T21:02:14,631 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=106, ppid=104, state=RUNNABLE; SnapshotRegionProcedure a85dc09c32e23ad6867b018fba70db05 2024-12-16T21:02:14,632 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=106, resume processing ppid=104 2024-12-16T21:02:14,632 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, ppid=104, state=SUCCESS; SnapshotRegionProcedure a85dc09c32e23ad6867b018fba70db05 in 166 msec 2024-12-16T21:02:14,632 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T21:02:14,633 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T21:02:14,633 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T21:02:14,633 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-16T21:02:14,634 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-16T21:02:14,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742105_1281 (size=597) 2024-12-16T21:02:14,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742105_1281 (size=597) 2024-12-16T21:02:14,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742105_1281 (size=597) 2024-12-16T21:02:14,644 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T21:02:14,648 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T21:02:14,648 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-16T21:02:14,649 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T21:02:14,649 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-16T21:02:14,650 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 201 msec 2024-12-16T21:02:14,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-16T21:02:14,753 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 104 completed 2024-12-16T21:02:14,760 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41713 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemStateWithMergeRegion,,1734382933798.84bea62b79efa5fd6cbb8c34a4272c04. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T21:02:14,762 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemStateWithMergeRegion,1,1734382933798.a85dc09c32e23ad6867b018fba70db05. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T21:02:14,765 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T21:02:14,766 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1734382933798.84bea62b79efa5fd6cbb8c34a4272c04. 2024-12-16T21:02:14,766 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T21:02:14,776 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-16T21:02:14,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734382934776 (current time:1734382934776). 2024-12-16T21:02:14,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-16T21:02:14,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-16T21:02:14,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T21:02:14,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3939c099 to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@11e69894 2024-12-16T21:02:14,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7798d2a5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:02:14,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:02:14,782 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37098, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:02:14,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3939c099 to 127.0.0.1:61742 2024-12-16T21:02:14,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:02:14,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x62d00f58 to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@47d719ae 2024-12-16T21:02:14,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76ebe60b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:02:14,789 DEBUG [hconnection-0x32765944-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:02:14,790 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37108, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:02:14,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:02:14,792 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43426, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:02:14,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x62d00f58 to 127.0.0.1:61742 2024-12-16T21:02:14,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:02:14,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-16T21:02:14,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T21:02:14,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=107, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-16T21:02:14,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 107 2024-12-16T21:02:14,795 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-16T21:02:14,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107 2024-12-16T21:02:14,796 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T21:02:14,798 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T21:02:14,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742106_1282 (size=210) 2024-12-16T21:02:14,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742106_1282 (size=210) 2024-12-16T21:02:14,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742106_1282 (size=210) 2024-12-16T21:02:14,807 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T21:02:14,808 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=108, ppid=107, state=RUNNABLE; SnapshotRegionProcedure 84bea62b79efa5fd6cbb8c34a4272c04}, {pid=109, ppid=107, state=RUNNABLE; SnapshotRegionProcedure a85dc09c32e23ad6867b018fba70db05}] 2024-12-16T21:02:14,808 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=109, ppid=107, state=RUNNABLE; SnapshotRegionProcedure a85dc09c32e23ad6867b018fba70db05 2024-12-16T21:02:14,808 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=108, ppid=107, state=RUNNABLE; SnapshotRegionProcedure 84bea62b79efa5fd6cbb8c34a4272c04 2024-12-16T21:02:14,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107 2024-12-16T21:02:14,959 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6b259cf15929,41713,1734382821274 2024-12-16T21:02:14,959 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6b259cf15929,36861,1734382821216 2024-12-16T21:02:14,960 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36861 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=109 2024-12-16T21:02:14,960 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41713 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=108 2024-12-16T21:02:14,960 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1734382933798.a85dc09c32e23ad6867b018fba70db05. 2024-12-16T21:02:14,960 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1734382933798.84bea62b79efa5fd6cbb8c34a4272c04. 2024-12-16T21:02:14,960 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HRegion(2837): Flushing 84bea62b79efa5fd6cbb8c34a4272c04 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-16T21:02:14,960 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing a85dc09c32e23ad6867b018fba70db05 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-16T21:02:14,976 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion/a85dc09c32e23ad6867b018fba70db05/.tmp/cf/2755eacfeee94b3fb6916dc2431fd242 is 71, key is 14973bf226c272bafae323ef73c48070/cf:q/1734382934762/Put/seqid=0 2024-12-16T21:02:14,976 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion/84bea62b79efa5fd6cbb8c34a4272c04/.tmp/cf/85ec3e60ca5346fe9a2d4adf00aa8873 is 71, key is 01caf16403f0a90a9475de3a0668ea00/cf:q/1734382934760/Put/seqid=0 2024-12-16T21:02:14,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742108_1284 (size=5286) 2024-12-16T21:02:14,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742107_1283 (size=8324) 2024-12-16T21:02:14,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742108_1284 (size=5286) 2024-12-16T21:02:14,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742108_1284 (size=5286) 2024-12-16T21:02:14,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742107_1283 (size=8324) 2024-12-16T21:02:14,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742107_1283 (size=8324) 2024-12-16T21:02:14,984 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion/84bea62b79efa5fd6cbb8c34a4272c04/.tmp/cf/85ec3e60ca5346fe9a2d4adf00aa8873 2024-12-16T21:02:14,984 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion/a85dc09c32e23ad6867b018fba70db05/.tmp/cf/2755eacfeee94b3fb6916dc2431fd242 2024-12-16T21:02:14,989 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion/a85dc09c32e23ad6867b018fba70db05/.tmp/cf/2755eacfeee94b3fb6916dc2431fd242 as hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion/a85dc09c32e23ad6867b018fba70db05/cf/2755eacfeee94b3fb6916dc2431fd242 2024-12-16T21:02:14,989 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion/84bea62b79efa5fd6cbb8c34a4272c04/.tmp/cf/85ec3e60ca5346fe9a2d4adf00aa8873 as hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion/84bea62b79efa5fd6cbb8c34a4272c04/cf/85ec3e60ca5346fe9a2d4adf00aa8873 2024-12-16T21:02:14,993 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion/84bea62b79efa5fd6cbb8c34a4272c04/cf/85ec3e60ca5346fe9a2d4adf00aa8873, entries=3, sequenceid=6, filesize=5.2 K 2024-12-16T21:02:14,994 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion/a85dc09c32e23ad6867b018fba70db05/cf/2755eacfeee94b3fb6916dc2431fd242, entries=47, sequenceid=6, filesize=8.1 K 2024-12-16T21:02:14,994 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HRegion(3040): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 84bea62b79efa5fd6cbb8c34a4272c04 in 34ms, sequenceid=6, compaction requested=false 2024-12-16T21:02:14,994 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for a85dc09c32e23ad6867b018fba70db05 in 34ms, sequenceid=6, compaction requested=false 2024-12-16T21:02:14,994 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion' 2024-12-16T21:02:14,994 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion' 2024-12-16T21:02:14,995 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for a85dc09c32e23ad6867b018fba70db05: 2024-12-16T21:02:14,995 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HRegion(2538): Flush status journal for 84bea62b79efa5fd6cbb8c34a4272c04: 2024-12-16T21:02:14,995 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1734382933798.a85dc09c32e23ad6867b018fba70db05. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-16T21:02:14,995 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1734382933798.84bea62b79efa5fd6cbb8c34a4272c04. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-16T21:02:14,995 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1734382933798.a85dc09c32e23ad6867b018fba70db05.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-16T21:02:14,995 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T21:02:14,995 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1734382933798.84bea62b79efa5fd6cbb8c34a4272c04.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-16T21:02:14,995 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion/a85dc09c32e23ad6867b018fba70db05/cf/2755eacfeee94b3fb6916dc2431fd242] hfiles 2024-12-16T21:02:14,995 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion/a85dc09c32e23ad6867b018fba70db05/cf/2755eacfeee94b3fb6916dc2431fd242 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-16T21:02:14,995 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T21:02:14,995 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion/84bea62b79efa5fd6cbb8c34a4272c04/cf/85ec3e60ca5346fe9a2d4adf00aa8873] hfiles 2024-12-16T21:02:14,995 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion/84bea62b79efa5fd6cbb8c34a4272c04/cf/85ec3e60ca5346fe9a2d4adf00aa8873 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-16T21:02:15,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742110_1286 (size=125) 2024-12-16T21:02:15,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742109_1285 (size=125) 2024-12-16T21:02:15,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742110_1286 (size=125) 2024-12-16T21:02:15,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742109_1285 (size=125) 2024-12-16T21:02:15,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742109_1285 (size=125) 2024-12-16T21:02:15,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742110_1286 (size=125) 2024-12-16T21:02:15,005 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1734382933798.a85dc09c32e23ad6867b018fba70db05. 2024-12-16T21:02:15,005 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-12-16T21:02:15,005 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1734382933798.84bea62b79efa5fd6cbb8c34a4272c04. 2024-12-16T21:02:15,005 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=108 2024-12-16T21:02:15,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-12-16T21:02:15,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster(4106): Remote procedure done, pid=108 2024-12-16T21:02:15,005 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region a85dc09c32e23ad6867b018fba70db05 2024-12-16T21:02:15,005 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 84bea62b79efa5fd6cbb8c34a4272c04 2024-12-16T21:02:15,005 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=108, ppid=107, state=RUNNABLE; SnapshotRegionProcedure 84bea62b79efa5fd6cbb8c34a4272c04 2024-12-16T21:02:15,005 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=109, ppid=107, state=RUNNABLE; SnapshotRegionProcedure a85dc09c32e23ad6867b018fba70db05 2024-12-16T21:02:15,007 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=107, state=SUCCESS; SnapshotRegionProcedure a85dc09c32e23ad6867b018fba70db05 in 199 msec 2024-12-16T21:02:15,008 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=108, resume processing ppid=107 2024-12-16T21:02:15,008 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, ppid=107, state=SUCCESS; SnapshotRegionProcedure 84bea62b79efa5fd6cbb8c34a4272c04 in 199 msec 2024-12-16T21:02:15,008 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T21:02:15,008 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T21:02:15,009 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T21:02:15,009 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-16T21:02:15,010 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-16T21:02:15,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742111_1287 (size=675) 2024-12-16T21:02:15,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742111_1287 (size=675) 2024-12-16T21:02:15,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742111_1287 (size=675) 2024-12-16T21:02:15,024 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T21:02:15,029 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T21:02:15,030 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-16T21:02:15,031 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T21:02:15,031 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 107 2024-12-16T21:02:15,032 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 237 msec 2024-12-16T21:02:15,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107 2024-12-16T21:02:15,098 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 107 completed 2024-12-16T21:02:15,120 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-16T21:02:15,122 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48886, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-16T21:02:15,123 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-16T21:02:15,123 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-16T21:02:15,125 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37116, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-16T21:02:15,125 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41713 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-16T21:02:15,125 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-16T21:02:15,126 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43438, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-16T21:02:15,127 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42323 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-16T21:02:15,129 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion-1', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T21:02:15,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T21:02:15,131 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T21:02:15,131 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:02:15,131 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion-1" procId is: 110 2024-12-16T21:02:15,132 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T21:02:15,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-16T21:02:15,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742112_1288 (size=399) 2024-12-16T21:02:15,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742112_1288 (size=399) 2024-12-16T21:02:15,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742112_1288 (size=399) 2024-12-16T21:02:15,144 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 44beb29d81f4cf0927ddafdfed5b677e, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1734382935128.44beb29d81f4cf0927ddafdfed5b677e.', STARTKEY => '2', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:02:15,144 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => e90e8afe2ca383d37e67af524803fc54, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935128.e90e8afe2ca383d37e67af524803fc54.', STARTKEY => '', ENDKEY => '2'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:02:15,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742113_1289 (size=85) 2024-12-16T21:02:15,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742113_1289 (size=85) 2024-12-16T21:02:15,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742113_1289 (size=85) 2024-12-16T21:02:15,152 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935128.e90e8afe2ca383d37e67af524803fc54.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:02:15,152 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1681): Closing e90e8afe2ca383d37e67af524803fc54, disabling compactions & flushes 2024-12-16T21:02:15,152 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935128.e90e8afe2ca383d37e67af524803fc54. 2024-12-16T21:02:15,152 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935128.e90e8afe2ca383d37e67af524803fc54. 2024-12-16T21:02:15,152 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935128.e90e8afe2ca383d37e67af524803fc54. after waiting 0 ms 2024-12-16T21:02:15,152 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935128.e90e8afe2ca383d37e67af524803fc54. 2024-12-16T21:02:15,152 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935128.e90e8afe2ca383d37e67af524803fc54. 2024-12-16T21:02:15,152 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1635): Region close journal for e90e8afe2ca383d37e67af524803fc54: 2024-12-16T21:02:15,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742114_1290 (size=85) 2024-12-16T21:02:15,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742114_1290 (size=85) 2024-12-16T21:02:15,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742114_1290 (size=85) 2024-12-16T21:02:15,159 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1734382935128.44beb29d81f4cf0927ddafdfed5b677e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:02:15,159 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1681): Closing 44beb29d81f4cf0927ddafdfed5b677e, disabling compactions & flushes 2024-12-16T21:02:15,159 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1734382935128.44beb29d81f4cf0927ddafdfed5b677e. 2024-12-16T21:02:15,159 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1734382935128.44beb29d81f4cf0927ddafdfed5b677e. 2024-12-16T21:02:15,159 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1734382935128.44beb29d81f4cf0927ddafdfed5b677e. after waiting 0 ms 2024-12-16T21:02:15,159 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1734382935128.44beb29d81f4cf0927ddafdfed5b677e. 2024-12-16T21:02:15,159 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1734382935128.44beb29d81f4cf0927ddafdfed5b677e. 2024-12-16T21:02:15,159 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1635): Region close journal for 44beb29d81f4cf0927ddafdfed5b677e: 2024-12-16T21:02:15,160 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T21:02:15,160 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935128.e90e8afe2ca383d37e67af524803fc54.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1734382935160"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734382935160"}]},"ts":"1734382935160"} 2024-12-16T21:02:15,160 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1734382935128.44beb29d81f4cf0927ddafdfed5b677e.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1734382935160"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734382935160"}]},"ts":"1734382935160"} 2024-12-16T21:02:15,163 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-16T21:02:15,163 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T21:02:15,164 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382935163"}]},"ts":"1734382935163"} 2024-12-16T21:02:15,165 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLING in hbase:meta 2024-12-16T21:02:15,171 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(202): Hosts are {6b259cf15929=0} racks are {/default-rack=0} 2024-12-16T21:02:15,172 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-16T21:02:15,172 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-16T21:02:15,172 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-16T21:02:15,172 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-16T21:02:15,172 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-16T21:02:15,172 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-16T21:02:15,172 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-16T21:02:15,173 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e90e8afe2ca383d37e67af524803fc54, ASSIGN}, {pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=44beb29d81f4cf0927ddafdfed5b677e, ASSIGN}] 2024-12-16T21:02:15,173 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=44beb29d81f4cf0927ddafdfed5b677e, ASSIGN 2024-12-16T21:02:15,174 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e90e8afe2ca383d37e67af524803fc54, ASSIGN 2024-12-16T21:02:15,174 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=44beb29d81f4cf0927ddafdfed5b677e, ASSIGN; state=OFFLINE, location=6b259cf15929,41713,1734382821274; forceNewPlan=false, retain=false 2024-12-16T21:02:15,175 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e90e8afe2ca383d37e67af524803fc54, ASSIGN; state=OFFLINE, location=6b259cf15929,42323,1734382821139; forceNewPlan=false, retain=false 2024-12-16T21:02:15,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-16T21:02:15,325 INFO [6b259cf15929:34343 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-16T21:02:15,325 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=111 updating hbase:meta row=e90e8afe2ca383d37e67af524803fc54, regionState=OPENING, regionLocation=6b259cf15929,42323,1734382821139 2024-12-16T21:02:15,325 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=112 updating hbase:meta row=44beb29d81f4cf0927ddafdfed5b677e, regionState=OPENING, regionLocation=6b259cf15929,41713,1734382821274 2024-12-16T21:02:15,326 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; OpenRegionProcedure 44beb29d81f4cf0927ddafdfed5b677e, server=6b259cf15929,41713,1734382821274}] 2024-12-16T21:02:15,327 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=114, ppid=111, state=RUNNABLE; OpenRegionProcedure e90e8afe2ca383d37e67af524803fc54, server=6b259cf15929,42323,1734382821139}] 2024-12-16T21:02:15,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-16T21:02:15,478 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6b259cf15929,41713,1734382821274 2024-12-16T21:02:15,478 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6b259cf15929,42323,1734382821139 2024-12-16T21:02:15,482 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion-1,2,1734382935128.44beb29d81f4cf0927ddafdfed5b677e. 2024-12-16T21:02:15,482 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935128.e90e8afe2ca383d37e67af524803fc54. 2024-12-16T21:02:15,482 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7285): Opening region: {ENCODED => 44beb29d81f4cf0927ddafdfed5b677e, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1734382935128.44beb29d81f4cf0927ddafdfed5b677e.', STARTKEY => '2', ENDKEY => ''} 2024-12-16T21:02:15,482 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(7285): Opening region: {ENCODED => e90e8afe2ca383d37e67af524803fc54, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935128.e90e8afe2ca383d37e67af524803fc54.', STARTKEY => '', ENDKEY => '2'} 2024-12-16T21:02:15,482 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1734382935128.44beb29d81f4cf0927ddafdfed5b677e. service=AccessControlService 2024-12-16T21:02:15,482 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935128.e90e8afe2ca383d37e67af524803fc54. service=AccessControlService 2024-12-16T21:02:15,482 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T21:02:15,482 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T21:02:15,482 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 e90e8afe2ca383d37e67af524803fc54 2024-12-16T21:02:15,482 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 44beb29d81f4cf0927ddafdfed5b677e 2024-12-16T21:02:15,483 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935128.e90e8afe2ca383d37e67af524803fc54.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:02:15,483 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1734382935128.44beb29d81f4cf0927ddafdfed5b677e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:02:15,483 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(7327): checking encryption for e90e8afe2ca383d37e67af524803fc54 2024-12-16T21:02:15,483 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7327): checking encryption for 44beb29d81f4cf0927ddafdfed5b677e 2024-12-16T21:02:15,483 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(7330): checking classloading for e90e8afe2ca383d37e67af524803fc54 2024-12-16T21:02:15,483 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7330): checking classloading for 44beb29d81f4cf0927ddafdfed5b677e 2024-12-16T21:02:15,484 INFO [StoreOpener-44beb29d81f4cf0927ddafdfed5b677e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 44beb29d81f4cf0927ddafdfed5b677e 2024-12-16T21:02:15,485 INFO [StoreOpener-44beb29d81f4cf0927ddafdfed5b677e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 44beb29d81f4cf0927ddafdfed5b677e columnFamilyName cf 2024-12-16T21:02:15,485 DEBUG [StoreOpener-44beb29d81f4cf0927ddafdfed5b677e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:02:15,486 INFO [StoreOpener-44beb29d81f4cf0927ddafdfed5b677e-1 {}] regionserver.HStore(327): Store=44beb29d81f4cf0927ddafdfed5b677e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T21:02:15,486 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/44beb29d81f4cf0927ddafdfed5b677e 2024-12-16T21:02:15,487 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/44beb29d81f4cf0927ddafdfed5b677e 2024-12-16T21:02:15,489 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(1085): writing seq id for 44beb29d81f4cf0927ddafdfed5b677e 2024-12-16T21:02:15,490 INFO [StoreOpener-e90e8afe2ca383d37e67af524803fc54-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e90e8afe2ca383d37e67af524803fc54 2024-12-16T21:02:15,491 INFO [StoreOpener-e90e8afe2ca383d37e67af524803fc54-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e90e8afe2ca383d37e67af524803fc54 columnFamilyName cf 2024-12-16T21:02:15,491 DEBUG [StoreOpener-e90e8afe2ca383d37e67af524803fc54-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:02:15,492 INFO [StoreOpener-e90e8afe2ca383d37e67af524803fc54-1 {}] regionserver.HStore(327): Store=e90e8afe2ca383d37e67af524803fc54/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T21:02:15,492 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e90e8afe2ca383d37e67af524803fc54 2024-12-16T21:02:15,493 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e90e8afe2ca383d37e67af524803fc54 2024-12-16T21:02:15,494 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/44beb29d81f4cf0927ddafdfed5b677e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T21:02:15,494 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(1102): Opened 44beb29d81f4cf0927ddafdfed5b677e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59610683, jitterRate=-0.11173160374164581}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T21:02:15,495 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(1001): Region open journal for 44beb29d81f4cf0927ddafdfed5b677e: 2024-12-16T21:02:15,495 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(1085): writing seq id for e90e8afe2ca383d37e67af524803fc54 2024-12-16T21:02:15,495 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,2,1734382935128.44beb29d81f4cf0927ddafdfed5b677e., pid=113, masterSystemTime=1734382935478 2024-12-16T21:02:15,497 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e90e8afe2ca383d37e67af524803fc54/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T21:02:15,497 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,2,1734382935128.44beb29d81f4cf0927ddafdfed5b677e. 2024-12-16T21:02:15,497 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion-1,2,1734382935128.44beb29d81f4cf0927ddafdfed5b677e. 2024-12-16T21:02:15,497 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(1102): Opened e90e8afe2ca383d37e67af524803fc54; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64361697, jitterRate=-0.040935978293418884}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T21:02:15,497 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(1001): Region open journal for e90e8afe2ca383d37e67af524803fc54: 2024-12-16T21:02:15,497 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=112 updating hbase:meta row=44beb29d81f4cf0927ddafdfed5b677e, regionState=OPEN, openSeqNum=2, regionLocation=6b259cf15929,41713,1734382821274 2024-12-16T21:02:15,498 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935128.e90e8afe2ca383d37e67af524803fc54., pid=114, masterSystemTime=1734382935478 2024-12-16T21:02:15,499 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935128.e90e8afe2ca383d37e67af524803fc54. 2024-12-16T21:02:15,499 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935128.e90e8afe2ca383d37e67af524803fc54. 2024-12-16T21:02:15,500 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=111 updating hbase:meta row=e90e8afe2ca383d37e67af524803fc54, regionState=OPEN, openSeqNum=2, regionLocation=6b259cf15929,42323,1734382821139 2024-12-16T21:02:15,501 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-12-16T21:02:15,502 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; OpenRegionProcedure 44beb29d81f4cf0927ddafdfed5b677e, server=6b259cf15929,41713,1734382821274 in 173 msec 2024-12-16T21:02:15,503 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=114, resume processing ppid=111 2024-12-16T21:02:15,503 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, ppid=110, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=44beb29d81f4cf0927ddafdfed5b677e, ASSIGN in 330 msec 2024-12-16T21:02:15,503 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, ppid=111, state=SUCCESS; OpenRegionProcedure e90e8afe2ca383d37e67af524803fc54, server=6b259cf15929,42323,1734382821139 in 174 msec 2024-12-16T21:02:15,504 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-12-16T21:02:15,504 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e90e8afe2ca383d37e67af524803fc54, ASSIGN in 331 msec 2024-12-16T21:02:15,505 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T21:02:15,505 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382935505"}]},"ts":"1734382935505"} 2024-12-16T21:02:15,506 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLED in hbase:meta 2024-12-16T21:02:15,509 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T21:02:15,509 DEBUG [PEWorker-1 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion-1 jenkins: RWXCA 2024-12-16T21:02:15,511 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42323 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-16T21:02:15,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:15,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:15,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:15,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:15,516 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 386 msec 2024-12-16T21:02:15,516 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:15,516 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:15,516 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:15,516 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:15,517 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:15,517 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:15,517 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:15,517 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:15,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-16T21:02:15,735 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 110 completed 2024-12-16T21:02:15,757 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster$2(2219): Client=jenkins//172.17.0.2 merge regions [e90e8afe2ca383d37e67af524803fc54, 44beb29d81f4cf0927ddafdfed5b677e] 2024-12-16T21:02:15,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=115, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[e90e8afe2ca383d37e67af524803fc54, 44beb29d81f4cf0927ddafdfed5b677e], force=true 2024-12-16T21:02:15,763 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=115, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[e90e8afe2ca383d37e67af524803fc54, 44beb29d81f4cf0927ddafdfed5b677e], force=true 2024-12-16T21:02:15,763 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=115, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[e90e8afe2ca383d37e67af524803fc54, 44beb29d81f4cf0927ddafdfed5b677e], force=true 2024-12-16T21:02:15,763 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=115, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[e90e8afe2ca383d37e67af524803fc54, 44beb29d81f4cf0927ddafdfed5b677e], force=true 2024-12-16T21:02:15,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-16T21:02:15,775 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=116, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e90e8afe2ca383d37e67af524803fc54, UNASSIGN}, {pid=117, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=44beb29d81f4cf0927ddafdfed5b677e, UNASSIGN}] 2024-12-16T21:02:15,776 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=117, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=44beb29d81f4cf0927ddafdfed5b677e, UNASSIGN 2024-12-16T21:02:15,776 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=116, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e90e8afe2ca383d37e67af524803fc54, UNASSIGN 2024-12-16T21:02:15,777 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=116 updating hbase:meta row=e90e8afe2ca383d37e67af524803fc54, regionState=CLOSING, regionLocation=6b259cf15929,42323,1734382821139 2024-12-16T21:02:15,777 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=117 updating hbase:meta row=44beb29d81f4cf0927ddafdfed5b677e, regionState=CLOSING, regionLocation=6b259cf15929,41713,1734382821274 2024-12-16T21:02:15,778 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-16T21:02:15,778 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=118, ppid=116, state=RUNNABLE; CloseRegionProcedure e90e8afe2ca383d37e67af524803fc54, server=6b259cf15929,42323,1734382821139}] 2024-12-16T21:02:15,779 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-16T21:02:15,781 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=117, state=RUNNABLE; CloseRegionProcedure 44beb29d81f4cf0927ddafdfed5b677e, server=6b259cf15929,41713,1734382821274}] 2024-12-16T21:02:15,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-16T21:02:15,932 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6b259cf15929,42323,1734382821139 2024-12-16T21:02:15,932 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6b259cf15929,41713,1734382821274 2024-12-16T21:02:15,932 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(124): Close 44beb29d81f4cf0927ddafdfed5b677e 2024-12-16T21:02:15,932 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] handler.UnassignRegionHandler(124): Close e90e8afe2ca383d37e67af524803fc54 2024-12-16T21:02:15,932 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] handler.UnassignRegionHandler(138): Unassign region: split region: true: evictCache: true 2024-12-16T21:02:15,932 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(138): Unassign region: split region: true: evictCache: true 2024-12-16T21:02:15,933 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1681): Closing 44beb29d81f4cf0927ddafdfed5b677e, disabling compactions & flushes 2024-12-16T21:02:15,933 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1681): Closing e90e8afe2ca383d37e67af524803fc54, disabling compactions & flushes 2024-12-16T21:02:15,933 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1734382935128.44beb29d81f4cf0927ddafdfed5b677e. 2024-12-16T21:02:15,933 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935128.e90e8afe2ca383d37e67af524803fc54. 2024-12-16T21:02:15,933 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1734382935128.44beb29d81f4cf0927ddafdfed5b677e. 2024-12-16T21:02:15,933 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935128.e90e8afe2ca383d37e67af524803fc54. 2024-12-16T21:02:15,933 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1734382935128.44beb29d81f4cf0927ddafdfed5b677e. after waiting 0 ms 2024-12-16T21:02:15,933 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935128.e90e8afe2ca383d37e67af524803fc54. after waiting 0 ms 2024-12-16T21:02:15,933 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1734382935128.44beb29d81f4cf0927ddafdfed5b677e. 2024-12-16T21:02:15,933 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935128.e90e8afe2ca383d37e67af524803fc54. 2024-12-16T21:02:15,933 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(2837): Flushing e90e8afe2ca383d37e67af524803fc54 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-16T21:02:15,933 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(2837): Flushing 44beb29d81f4cf0927ddafdfed5b677e 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-16T21:02:15,949 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/44beb29d81f4cf0927ddafdfed5b677e/.tmp/cf/59b203f75337441e8fde938f2e7c1162 is 28, key is 2/cf:/1734382935745/Put/seqid=0 2024-12-16T21:02:15,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742115_1291 (size=4945) 2024-12-16T21:02:15,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742115_1291 (size=4945) 2024-12-16T21:02:15,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742115_1291 (size=4945) 2024-12-16T21:02:15,955 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/44beb29d81f4cf0927ddafdfed5b677e/.tmp/cf/59b203f75337441e8fde938f2e7c1162 2024-12-16T21:02:15,955 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e90e8afe2ca383d37e67af524803fc54/.tmp/cf/218c176aca13469ea5eac9317a4e6cae is 28, key is 1/cf:/1734382935739/Put/seqid=0 2024-12-16T21:02:15,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742116_1292 (size=4945) 2024-12-16T21:02:15,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742116_1292 (size=4945) 2024-12-16T21:02:15,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742116_1292 (size=4945) 2024-12-16T21:02:15,966 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e90e8afe2ca383d37e67af524803fc54/.tmp/cf/218c176aca13469ea5eac9317a4e6cae 2024-12-16T21:02:15,969 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/44beb29d81f4cf0927ddafdfed5b677e/.tmp/cf/59b203f75337441e8fde938f2e7c1162 as hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/44beb29d81f4cf0927ddafdfed5b677e/cf/59b203f75337441e8fde938f2e7c1162 2024-12-16T21:02:15,972 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e90e8afe2ca383d37e67af524803fc54/.tmp/cf/218c176aca13469ea5eac9317a4e6cae as hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e90e8afe2ca383d37e67af524803fc54/cf/218c176aca13469ea5eac9317a4e6cae 2024-12-16T21:02:15,974 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/44beb29d81f4cf0927ddafdfed5b677e/cf/59b203f75337441e8fde938f2e7c1162, entries=1, sequenceid=5, filesize=4.8 K 2024-12-16T21:02:15,975 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 44beb29d81f4cf0927ddafdfed5b677e in 42ms, sequenceid=5, compaction requested=false 2024-12-16T21:02:15,975 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1' 2024-12-16T21:02:15,978 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e90e8afe2ca383d37e67af524803fc54/cf/218c176aca13469ea5eac9317a4e6cae, entries=1, sequenceid=5, filesize=4.8 K 2024-12-16T21:02:15,978 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(3040): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for e90e8afe2ca383d37e67af524803fc54 in 45ms, sequenceid=5, compaction requested=false 2024-12-16T21:02:15,981 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/44beb29d81f4cf0927ddafdfed5b677e/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-16T21:02:15,982 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T21:02:15,982 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1734382935128.44beb29d81f4cf0927ddafdfed5b677e. 2024-12-16T21:02:15,982 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1635): Region close journal for 44beb29d81f4cf0927ddafdfed5b677e: 2024-12-16T21:02:15,982 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e90e8afe2ca383d37e67af524803fc54/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-16T21:02:15,983 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T21:02:15,983 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935128.e90e8afe2ca383d37e67af524803fc54. 2024-12-16T21:02:15,983 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1635): Region close journal for e90e8afe2ca383d37e67af524803fc54: 2024-12-16T21:02:15,983 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(170): Closed 44beb29d81f4cf0927ddafdfed5b677e 2024-12-16T21:02:15,984 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=117 updating hbase:meta row=44beb29d81f4cf0927ddafdfed5b677e, regionState=CLOSED 2024-12-16T21:02:15,984 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] handler.UnassignRegionHandler(170): Closed e90e8afe2ca383d37e67af524803fc54 2024-12-16T21:02:15,984 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=116 updating hbase:meta row=e90e8afe2ca383d37e67af524803fc54, regionState=CLOSED 2024-12-16T21:02:15,987 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=117 2024-12-16T21:02:15,987 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=117, state=SUCCESS; CloseRegionProcedure 44beb29d81f4cf0927ddafdfed5b677e, server=6b259cf15929,41713,1734382821274 in 206 msec 2024-12-16T21:02:15,987 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=118, resume processing ppid=116 2024-12-16T21:02:15,988 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, ppid=116, state=SUCCESS; CloseRegionProcedure e90e8afe2ca383d37e67af524803fc54, server=6b259cf15929,42323,1734382821139 in 208 msec 2024-12-16T21:02:15,988 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=115, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=44beb29d81f4cf0927ddafdfed5b677e, UNASSIGN in 212 msec 2024-12-16T21:02:15,989 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=116, resume processing ppid=115 2024-12-16T21:02:15,989 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, ppid=115, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e90e8afe2ca383d37e67af524803fc54, UNASSIGN in 212 msec 2024-12-16T21:02:16,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742117_1293 (size=84) 2024-12-16T21:02:16,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742117_1293 (size=84) 2024-12-16T21:02:16,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742117_1293 (size=84) 2024-12-16T21:02:16,006 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:02:16,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742118_1294 (size=20) 2024-12-16T21:02:16,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742118_1294 (size=20) 2024-12-16T21:02:16,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742118_1294 (size=20) 2024-12-16T21:02:16,017 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:02:16,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742119_1295 (size=21) 2024-12-16T21:02:16,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742119_1295 (size=21) 2024-12-16T21:02:16,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742119_1295 (size=21) 2024-12-16T21:02:16,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742120_1296 (size=84) 2024-12-16T21:02:16,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742120_1296 (size=84) 2024-12-16T21:02:16,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742120_1296 (size=84) 2024-12-16T21:02:16,055 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:02:16,064 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/af64a4a5600e9932d60c8c31a3595921/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=-1 2024-12-16T21:02:16,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-16T21:02:16,066 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935128.e90e8afe2ca383d37e67af524803fc54.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-16T21:02:16,066 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1734382935128.44beb29d81f4cf0927ddafdfed5b677e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-16T21:02:16,066 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":7,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935129.af64a4a5600e9932d60c8c31a3595921.","families":{"info":[{"qualifier":"regioninfo","vlen":83,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0000","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0001","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-16T21:02:16,102 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=120, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=af64a4a5600e9932d60c8c31a3595921, ASSIGN}] 2024-12-16T21:02:16,103 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=120, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=af64a4a5600e9932d60c8c31a3595921, ASSIGN 2024-12-16T21:02:16,103 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=120, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=af64a4a5600e9932d60c8c31a3595921, ASSIGN; state=MERGED, location=6b259cf15929,42323,1734382821139; forceNewPlan=false, retain=false 2024-12-16T21:02:16,253 INFO [6b259cf15929:34343 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-16T21:02:16,254 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=120 updating hbase:meta row=af64a4a5600e9932d60c8c31a3595921, regionState=OPENING, regionLocation=6b259cf15929,42323,1734382821139 2024-12-16T21:02:16,255 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; OpenRegionProcedure af64a4a5600e9932d60c8c31a3595921, server=6b259cf15929,42323,1734382821139}] 2024-12-16T21:02:16,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-16T21:02:16,407 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6b259cf15929,42323,1734382821139 2024-12-16T21:02:16,410 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935129.af64a4a5600e9932d60c8c31a3595921. 2024-12-16T21:02:16,410 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7285): Opening region: {ENCODED => af64a4a5600e9932d60c8c31a3595921, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935129.af64a4a5600e9932d60c8c31a3595921.', STARTKEY => '', ENDKEY => ''} 2024-12-16T21:02:16,410 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935129.af64a4a5600e9932d60c8c31a3595921. service=AccessControlService 2024-12-16T21:02:16,410 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T21:02:16,410 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 af64a4a5600e9932d60c8c31a3595921 2024-12-16T21:02:16,411 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935129.af64a4a5600e9932d60c8c31a3595921.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:02:16,411 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7327): checking encryption for af64a4a5600e9932d60c8c31a3595921 2024-12-16T21:02:16,411 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7330): checking classloading for af64a4a5600e9932d60c8c31a3595921 2024-12-16T21:02:16,412 INFO [StoreOpener-af64a4a5600e9932d60c8c31a3595921-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region af64a4a5600e9932d60c8c31a3595921 2024-12-16T21:02:16,413 INFO [StoreOpener-af64a4a5600e9932d60c8c31a3595921-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region af64a4a5600e9932d60c8c31a3595921 columnFamilyName cf 2024-12-16T21:02:16,413 DEBUG [StoreOpener-af64a4a5600e9932d60c8c31a3595921-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:02:16,432 DEBUG [StoreOpener-af64a4a5600e9932d60c8c31a3595921-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/af64a4a5600e9932d60c8c31a3595921/cf/218c176aca13469ea5eac9317a4e6cae.e90e8afe2ca383d37e67af524803fc54->hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e90e8afe2ca383d37e67af524803fc54/cf/218c176aca13469ea5eac9317a4e6cae-top 2024-12-16T21:02:16,438 DEBUG [StoreOpener-af64a4a5600e9932d60c8c31a3595921-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/af64a4a5600e9932d60c8c31a3595921/cf/59b203f75337441e8fde938f2e7c1162.44beb29d81f4cf0927ddafdfed5b677e->hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/44beb29d81f4cf0927ddafdfed5b677e/cf/59b203f75337441e8fde938f2e7c1162-top 2024-12-16T21:02:16,438 INFO [StoreOpener-af64a4a5600e9932d60c8c31a3595921-1 {}] regionserver.HStore(327): Store=af64a4a5600e9932d60c8c31a3595921/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T21:02:16,439 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/af64a4a5600e9932d60c8c31a3595921 2024-12-16T21:02:16,440 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/af64a4a5600e9932d60c8c31a3595921 2024-12-16T21:02:16,442 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(1085): writing seq id for af64a4a5600e9932d60c8c31a3595921 2024-12-16T21:02:16,443 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(1102): Opened af64a4a5600e9932d60c8c31a3595921; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65846260, jitterRate=-0.018814265727996826}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T21:02:16,444 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(1001): Region open journal for af64a4a5600e9932d60c8c31a3595921: 2024-12-16T21:02:16,445 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935129.af64a4a5600e9932d60c8c31a3595921., pid=121, masterSystemTime=1734382936407 2024-12-16T21:02:16,445 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.CompactSplit(342): Ignoring compaction request for testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935129.af64a4a5600e9932d60c8c31a3595921.,because compaction is disabled. 2024-12-16T21:02:16,447 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935129.af64a4a5600e9932d60c8c31a3595921. 2024-12-16T21:02:16,447 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935129.af64a4a5600e9932d60c8c31a3595921. 2024-12-16T21:02:16,448 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=120 updating hbase:meta row=af64a4a5600e9932d60c8c31a3595921, regionState=OPEN, openSeqNum=9, regionLocation=6b259cf15929,42323,1734382821139 2024-12-16T21:02:16,451 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-12-16T21:02:16,451 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; OpenRegionProcedure af64a4a5600e9932d60c8c31a3595921, server=6b259cf15929,42323,1734382821139 in 194 msec 2024-12-16T21:02:16,452 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=120, resume processing ppid=115 2024-12-16T21:02:16,452 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, ppid=115, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=af64a4a5600e9932d60c8c31a3595921, ASSIGN in 349 msec 2024-12-16T21:02:16,454 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, state=SUCCESS; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[e90e8afe2ca383d37e67af524803fc54, 44beb29d81f4cf0927ddafdfed5b677e], force=true in 693 msec 2024-12-16T21:02:16,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-16T21:02:16,868 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: MERGE_REGIONS, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 115 completed 2024-12-16T21:02:16,869 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-16T21:02:16,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734382936869 (current time:1734382936869). 2024-12-16T21:02:16,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-16T21:02:16,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 VERSION not specified, setting to 2 2024-12-16T21:02:16,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T21:02:16,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x08333911 to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@198c0047 2024-12-16T21:02:16,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f0d9129, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:02:16,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:02:16,877 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37118, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:02:16,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x08333911 to 127.0.0.1:61742 2024-12-16T21:02:16,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:02:16,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1adc252f to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@63ab800b 2024-12-16T21:02:16,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60acd18e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:02:16,885 DEBUG [hconnection-0x58553a44-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:02:16,886 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37132, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:02:16,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:02:16,889 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43440, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:02:16,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1adc252f to 127.0.0.1:61742 2024-12-16T21:02:16,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:02:16,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-16T21:02:16,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T21:02:16,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-16T21:02:16,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-16T21:02:16,893 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-16T21:02:16,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-16T21:02:16,894 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T21:02:16,900 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T21:02:16,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742121_1297 (size=216) 2024-12-16T21:02:16,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742121_1297 (size=216) 2024-12-16T21:02:16,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742121_1297 (size=216) 2024-12-16T21:02:16,908 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T21:02:16,908 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; SnapshotRegionProcedure af64a4a5600e9932d60c8c31a3595921}] 2024-12-16T21:02:16,909 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=123, ppid=122, state=RUNNABLE; SnapshotRegionProcedure af64a4a5600e9932d60c8c31a3595921 2024-12-16T21:02:16,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-16T21:02:17,061 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6b259cf15929,42323,1734382821139 2024-12-16T21:02:17,062 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42323 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=123 2024-12-16T21:02:17,062 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935129.af64a4a5600e9932d60c8c31a3595921. 2024-12-16T21:02:17,063 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2538): Flush status journal for af64a4a5600e9932d60c8c31a3595921: 2024-12-16T21:02:17,063 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935129.af64a4a5600e9932d60c8c31a3595921. for snaptb0-testExportFileSystemStateWithMergeRegion-1 completed. 2024-12-16T21:02:17,063 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935129.af64a4a5600e9932d60c8c31a3595921.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-16T21:02:17,063 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T21:02:17,063 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/af64a4a5600e9932d60c8c31a3595921/cf/218c176aca13469ea5eac9317a4e6cae.e90e8afe2ca383d37e67af524803fc54->hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e90e8afe2ca383d37e67af524803fc54/cf/218c176aca13469ea5eac9317a4e6cae-top, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/af64a4a5600e9932d60c8c31a3595921/cf/59b203f75337441e8fde938f2e7c1162.44beb29d81f4cf0927ddafdfed5b677e->hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/44beb29d81f4cf0927ddafdfed5b677e/cf/59b203f75337441e8fde938f2e7c1162-top] hfiles 2024-12-16T21:02:17,063 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(265): Adding reference for file (1/2): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/af64a4a5600e9932d60c8c31a3595921/cf/218c176aca13469ea5eac9317a4e6cae.e90e8afe2ca383d37e67af524803fc54 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-16T21:02:17,065 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(265): Adding reference for file (2/2): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/af64a4a5600e9932d60c8c31a3595921/cf/59b203f75337441e8fde938f2e7c1162.44beb29d81f4cf0927ddafdfed5b677e for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-16T21:02:17,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742122_1298 (size=269) 2024-12-16T21:02:17,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742122_1298 (size=269) 2024-12-16T21:02:17,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742122_1298 (size=269) 2024-12-16T21:02:17,087 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935129.af64a4a5600e9932d60c8c31a3595921. 2024-12-16T21:02:17,087 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-12-16T21:02:17,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster(4106): Remote procedure done, pid=123 2024-12-16T21:02:17,087 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 on region af64a4a5600e9932d60c8c31a3595921 2024-12-16T21:02:17,088 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=123, ppid=122, state=RUNNABLE; SnapshotRegionProcedure af64a4a5600e9932d60c8c31a3595921 2024-12-16T21:02:17,093 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-12-16T21:02:17,093 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; SnapshotRegionProcedure af64a4a5600e9932d60c8c31a3595921 in 180 msec 2024-12-16T21:02:17,093 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T21:02:17,094 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T21:02:17,094 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T21:02:17,094 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-16T21:02:17,095 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-16T21:02:17,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742123_1299 (size=670) 2024-12-16T21:02:17,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742123_1299 (size=670) 2024-12-16T21:02:17,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742123_1299 (size=670) 2024-12-16T21:02:17,111 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T21:02:17,117 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T21:02:17,118 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-16T21:02:17,120 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T21:02:17,120 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-16T21:02:17,121 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } in 229 msec 2024-12-16T21:02:17,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-16T21:02:17,196 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 122 completed 2024-12-16T21:02:17,196 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382937196 2024-12-16T21:02:17,196 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:37263, tgtDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382937196, rawTgtDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382937196, srcFsUri=hdfs://localhost:37263, srcDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:02:17,236 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:37263, inputRoot=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:02:17,236 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-715246216_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382937196, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382937196/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-16T21:02:17,238 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-16T21:02:17,245 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382937196/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-16T21:02:17,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742124_1300 (size=216) 2024-12-16T21:02:17,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742124_1300 (size=216) 2024-12-16T21:02:17,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742124_1300 (size=216) 2024-12-16T21:02:17,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742125_1301 (size=670) 2024-12-16T21:02:17,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742125_1301 (size=670) 2024-12-16T21:02:17,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742125_1301 (size=670) 2024-12-16T21:02:17,267 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-16T21:02:17,267 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-16T21:02:17,267 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-16T21:02:17,268 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-16T21:02:17,617 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0005_000001 (auth:SIMPLE) from 127.0.0.1:57244 2024-12-16T21:02:17,628 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-1_0/usercache/jenkins/appcache/application_1734382828217_0005/container_1734382828217_0005_01_000001/launch_container.sh] 2024-12-16T21:02:17,628 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-1_0/usercache/jenkins/appcache/application_1734382828217_0005/container_1734382828217_0005_01_000001/container_tokens] 2024-12-16T21:02:17,628 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-1_0/usercache/jenkins/appcache/application_1734382828217_0005/container_1734382828217_0005_01_000001/sysfs] 2024-12-16T21:02:18,287 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/hadoop-7896513403054101681.jar 2024-12-16T21:02:18,288 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-16T21:02:18,288 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-16T21:02:18,367 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/hadoop-7788002198191186235.jar 2024-12-16T21:02:18,368 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-16T21:02:18,368 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-16T21:02:18,368 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-16T21:02:18,369 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-16T21:02:18,369 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-16T21:02:18,369 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-16T21:02:18,369 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-16T21:02:18,369 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-16T21:02:18,370 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-16T21:02:18,370 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-16T21:02:18,370 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-16T21:02:18,370 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-16T21:02:18,371 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-16T21:02:18,371 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-16T21:02:18,371 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-16T21:02:18,371 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-16T21:02:18,371 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-16T21:02:18,372 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-16T21:02:18,372 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T21:02:18,373 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T21:02:18,373 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T21:02:18,373 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T21:02:18,374 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T21:02:18,374 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T21:02:18,374 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T21:02:18,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742126_1302 (size=451756) 2024-12-16T21:02:18,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742126_1302 (size=451756) 2024-12-16T21:02:18,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742126_1302 (size=451756) 2024-12-16T21:02:18,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742127_1303 (size=127628) 2024-12-16T21:02:18,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742127_1303 (size=127628) 2024-12-16T21:02:18,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742127_1303 (size=127628) 2024-12-16T21:02:18,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742128_1304 (size=2172137) 2024-12-16T21:02:18,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742128_1304 (size=2172137) 2024-12-16T21:02:18,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742128_1304 (size=2172137) 2024-12-16T21:02:18,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742129_1305 (size=213228) 2024-12-16T21:02:18,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742129_1305 (size=213228) 2024-12-16T21:02:18,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742129_1305 (size=213228) 2024-12-16T21:02:18,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742130_1306 (size=1877034) 2024-12-16T21:02:18,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742130_1306 (size=1877034) 2024-12-16T21:02:18,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742130_1306 (size=1877034) 2024-12-16T21:02:18,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742131_1307 (size=533455) 2024-12-16T21:02:18,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742131_1307 (size=533455) 2024-12-16T21:02:18,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742131_1307 (size=533455) 2024-12-16T21:02:18,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742132_1308 (size=7280644) 2024-12-16T21:02:18,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742132_1308 (size=7280644) 2024-12-16T21:02:18,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742132_1308 (size=7280644) 2024-12-16T21:02:18,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742133_1309 (size=4188619) 2024-12-16T21:02:18,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742133_1309 (size=4188619) 2024-12-16T21:02:18,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742133_1309 (size=4188619) 2024-12-16T21:02:18,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742134_1310 (size=20406) 2024-12-16T21:02:18,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742134_1310 (size=20406) 2024-12-16T21:02:18,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742134_1310 (size=20406) 2024-12-16T21:02:18,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742135_1311 (size=75495) 2024-12-16T21:02:18,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742135_1311 (size=75495) 2024-12-16T21:02:18,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742135_1311 (size=75495) 2024-12-16T21:02:19,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742136_1312 (size=45609) 2024-12-16T21:02:19,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742136_1312 (size=45609) 2024-12-16T21:02:19,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742136_1312 (size=45609) 2024-12-16T21:02:19,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742137_1313 (size=110084) 2024-12-16T21:02:19,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742137_1313 (size=110084) 2024-12-16T21:02:19,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742137_1313 (size=110084) 2024-12-16T21:02:19,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742138_1314 (size=1323991) 2024-12-16T21:02:19,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742138_1314 (size=1323991) 2024-12-16T21:02:19,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742138_1314 (size=1323991) 2024-12-16T21:02:19,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742139_1315 (size=23076) 2024-12-16T21:02:19,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742139_1315 (size=23076) 2024-12-16T21:02:19,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742139_1315 (size=23076) 2024-12-16T21:02:19,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742140_1316 (size=126803) 2024-12-16T21:02:19,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742140_1316 (size=126803) 2024-12-16T21:02:19,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742140_1316 (size=126803) 2024-12-16T21:02:19,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742141_1317 (size=322274) 2024-12-16T21:02:19,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742141_1317 (size=322274) 2024-12-16T21:02:19,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742141_1317 (size=322274) 2024-12-16T21:02:19,057 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-16T21:02:19,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742142_1318 (size=1832290) 2024-12-16T21:02:19,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742142_1318 (size=1832290) 2024-12-16T21:02:19,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742142_1318 (size=1832290) 2024-12-16T21:02:19,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742143_1319 (size=6350920) 2024-12-16T21:02:19,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742143_1319 (size=6350920) 2024-12-16T21:02:19,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742143_1319 (size=6350920) 2024-12-16T21:02:19,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742144_1320 (size=30081) 2024-12-16T21:02:19,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742144_1320 (size=30081) 2024-12-16T21:02:19,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742144_1320 (size=30081) 2024-12-16T21:02:19,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742145_1321 (size=53616) 2024-12-16T21:02:19,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742145_1321 (size=53616) 2024-12-16T21:02:19,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742145_1321 (size=53616) 2024-12-16T21:02:19,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742146_1322 (size=29229) 2024-12-16T21:02:19,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742146_1322 (size=29229) 2024-12-16T21:02:19,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742146_1322 (size=29229) 2024-12-16T21:02:19,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742147_1323 (size=169089) 2024-12-16T21:02:19,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742147_1323 (size=169089) 2024-12-16T21:02:19,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742147_1323 (size=169089) 2024-12-16T21:02:19,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742148_1324 (size=5175431) 2024-12-16T21:02:19,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742148_1324 (size=5175431) 2024-12-16T21:02:19,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742148_1324 (size=5175431) 2024-12-16T21:02:19,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742149_1325 (size=136454) 2024-12-16T21:02:19,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742149_1325 (size=136454) 2024-12-16T21:02:19,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742149_1325 (size=136454) 2024-12-16T21:02:19,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742150_1326 (size=907465) 2024-12-16T21:02:19,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742150_1326 (size=907465) 2024-12-16T21:02:19,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742150_1326 (size=907465) 2024-12-16T21:02:19,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742151_1327 (size=3317408) 2024-12-16T21:02:19,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742151_1327 (size=3317408) 2024-12-16T21:02:19,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742151_1327 (size=3317408) 2024-12-16T21:02:19,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742152_1328 (size=503880) 2024-12-16T21:02:19,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742152_1328 (size=503880) 2024-12-16T21:02:19,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742152_1328 (size=503880) 2024-12-16T21:02:19,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742153_1329 (size=4695811) 2024-12-16T21:02:19,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742153_1329 (size=4695811) 2024-12-16T21:02:19,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742153_1329 (size=4695811) 2024-12-16T21:02:19,260 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-16T21:02:19,262 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportFileSystemStateWithMergeRegion-1' hfile list 2024-12-16T21:02:19,264 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=9.7 K 2024-12-16T21:02:19,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742154_1330 (size=378) 2024-12-16T21:02:19,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742154_1330 (size=378) 2024-12-16T21:02:19,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742154_1330 (size=378) 2024-12-16T21:02:19,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742155_1331 (size=15) 2024-12-16T21:02:19,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742155_1331 (size=15) 2024-12-16T21:02:19,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742155_1331 (size=15) 2024-12-16T21:02:19,282 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T21:02:19,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742156_1332 (size=304940) 2024-12-16T21:02:19,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742156_1332 (size=304940) 2024-12-16T21:02:19,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742156_1332 (size=304940) 2024-12-16T21:02:19,318 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-16T21:02:19,318 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-16T21:02:19,481 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0006_000001 (auth:SIMPLE) from 127.0.0.1:47464 2024-12-16T21:02:20,872 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T21:02:20,872 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion Metrics about Tables on a single HBase RegionServer 2024-12-16T21:02:20,873 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T21:02:20,873 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 Metrics about Tables on a single HBase RegionServer 2024-12-16T21:02:20,874 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-16T21:02:24,310 DEBUG [master/6b259cf15929:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region a85dc09c32e23ad6867b018fba70db05 changed from -1.0 to 0.0, refreshing cache 2024-12-16T21:02:24,310 DEBUG [master/6b259cf15929:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 84bea62b79efa5fd6cbb8c34a4272c04 changed from -1.0 to 0.0, refreshing cache 2024-12-16T21:02:25,564 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0006_000001 (auth:SIMPLE) from 127.0.0.1:58422 2024-12-16T21:02:25,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742157_1333 (size=350614) 2024-12-16T21:02:25,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742157_1333 (size=350614) 2024-12-16T21:02:25,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742157_1333 (size=350614) 2024-12-16T21:02:26,375 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-16T21:02:27,819 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0006_000001 (auth:SIMPLE) from 127.0.0.1:46276 2024-12-16T21:02:31,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742158_1334 (size=4945) 2024-12-16T21:02:31,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742158_1334 (size=4945) 2024-12-16T21:02:31,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742158_1334 (size=4945) 2024-12-16T21:02:31,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742159_1335 (size=4945) 2024-12-16T21:02:31,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742159_1335 (size=4945) 2024-12-16T21:02:31,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742159_1335 (size=4945) 2024-12-16T21:02:31,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742160_1336 (size=17474) 2024-12-16T21:02:31,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742160_1336 (size=17474) 2024-12-16T21:02:31,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742160_1336 (size=17474) 2024-12-16T21:02:31,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742161_1337 (size=482) 2024-12-16T21:02:31,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742161_1337 (size=482) 2024-12-16T21:02:31,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742161_1337 (size=482) 2024-12-16T21:02:31,557 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-1_2/usercache/jenkins/appcache/application_1734382828217_0006/container_1734382828217_0006_01_000002/launch_container.sh] 2024-12-16T21:02:31,557 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-1_2/usercache/jenkins/appcache/application_1734382828217_0006/container_1734382828217_0006_01_000002/container_tokens] 2024-12-16T21:02:31,557 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-1_2/usercache/jenkins/appcache/application_1734382828217_0006/container_1734382828217_0006_01_000002/sysfs] 2024-12-16T21:02:31,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742162_1338 (size=17474) 2024-12-16T21:02:31,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742162_1338 (size=17474) 2024-12-16T21:02:31,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742162_1338 (size=17474) 2024-12-16T21:02:31,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742163_1339 (size=350614) 2024-12-16T21:02:31,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742163_1339 (size=350614) 2024-12-16T21:02:31,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742163_1339 (size=350614) 2024-12-16T21:02:31,592 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0006_000001 (auth:SIMPLE) from 127.0.0.1:40812 2024-12-16T21:02:32,650 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-16T21:02:32,651 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-16T21:02:32,657 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-16T21:02:32,657 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-16T21:02:32,658 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-16T21:02:32,658 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-715246216_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-16T21:02:32,658 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-16T21:02:32,658 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-16T21:02:32,658 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-715246216_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382937196/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382937196/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-16T21:02:32,659 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382937196/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-16T21:02:32,659 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382937196/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-16T21:02:32,664 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T21:02:32,664 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T21:02:32,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T21:02:32,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-16T21:02:32,667 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382952667"}]},"ts":"1734382952667"} 2024-12-16T21:02:32,668 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLING in hbase:meta 2024-12-16T21:02:32,670 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLING 2024-12-16T21:02:32,670 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1}] 2024-12-16T21:02:32,672 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=af64a4a5600e9932d60c8c31a3595921, UNASSIGN}] 2024-12-16T21:02:32,672 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=af64a4a5600e9932d60c8c31a3595921, UNASSIGN 2024-12-16T21:02:32,673 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=126 updating hbase:meta row=af64a4a5600e9932d60c8c31a3595921, regionState=CLOSING, regionLocation=6b259cf15929,42323,1734382821139 2024-12-16T21:02:32,674 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T21:02:32,674 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; CloseRegionProcedure af64a4a5600e9932d60c8c31a3595921, server=6b259cf15929,42323,1734382821139}] 2024-12-16T21:02:32,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-16T21:02:32,825 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6b259cf15929,42323,1734382821139 2024-12-16T21:02:32,826 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] handler.UnassignRegionHandler(124): Close af64a4a5600e9932d60c8c31a3595921 2024-12-16T21:02:32,826 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T21:02:32,826 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1681): Closing af64a4a5600e9932d60c8c31a3595921, disabling compactions & flushes 2024-12-16T21:02:32,826 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935129.af64a4a5600e9932d60c8c31a3595921. 2024-12-16T21:02:32,826 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935129.af64a4a5600e9932d60c8c31a3595921. 2024-12-16T21:02:32,826 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935129.af64a4a5600e9932d60c8c31a3595921. after waiting 0 ms 2024-12-16T21:02:32,826 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935129.af64a4a5600e9932d60c8c31a3595921. 2024-12-16T21:02:32,830 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/af64a4a5600e9932d60c8c31a3595921/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=8 2024-12-16T21:02:32,831 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T21:02:32,831 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935129.af64a4a5600e9932d60c8c31a3595921. 2024-12-16T21:02:32,831 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1635): Region close journal for af64a4a5600e9932d60c8c31a3595921: 2024-12-16T21:02:32,832 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] handler.UnassignRegionHandler(170): Closed af64a4a5600e9932d60c8c31a3595921 2024-12-16T21:02:32,832 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=126 updating hbase:meta row=af64a4a5600e9932d60c8c31a3595921, regionState=CLOSED 2024-12-16T21:02:32,835 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-12-16T21:02:32,835 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; CloseRegionProcedure af64a4a5600e9932d60c8c31a3595921, server=6b259cf15929,42323,1734382821139 in 160 msec 2024-12-16T21:02:32,836 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=126, resume processing ppid=125 2024-12-16T21:02:32,836 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, ppid=125, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=af64a4a5600e9932d60c8c31a3595921, UNASSIGN in 163 msec 2024-12-16T21:02:32,838 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-12-16T21:02:32,838 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 167 msec 2024-12-16T21:02:32,839 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382952838"}]},"ts":"1734382952838"} 2024-12-16T21:02:32,840 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLED in hbase:meta 2024-12-16T21:02:32,842 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLED 2024-12-16T21:02:32,843 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 178 msec 2024-12-16T21:02:32,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-16T21:02:32,969 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 124 completed 2024-12-16T21:02:32,969 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T21:02:32,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T21:02:32,971 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=128, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T21:02:32,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T21:02:32,971 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=128, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T21:02:32,973 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42323 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T21:02:32,974 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/af64a4a5600e9932d60c8c31a3595921 2024-12-16T21:02:32,974 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/44beb29d81f4cf0927ddafdfed5b677e 2024-12-16T21:02:32,974 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e90e8afe2ca383d37e67af524803fc54 2024-12-16T21:02:32,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T21:02:32,976 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T21:02:32,976 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T21:02:32,976 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T21:02:32,976 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-16T21:02:32,976 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-16T21:02:32,976 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-16T21:02:32,976 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-16T21:02:32,977 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/af64a4a5600e9932d60c8c31a3595921/cf, FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/af64a4a5600e9932d60c8c31a3595921/recovered.edits] 2024-12-16T21:02:32,977 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/44beb29d81f4cf0927ddafdfed5b677e/cf, FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/44beb29d81f4cf0927ddafdfed5b677e/recovered.edits] 2024-12-16T21:02:32,977 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e90e8afe2ca383d37e67af524803fc54/cf, FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e90e8afe2ca383d37e67af524803fc54/recovered.edits] 2024-12-16T21:02:32,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T21:02:32,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:32,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T21:02:32,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:32,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T21:02:32,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:32,979 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:32,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-16T21:02:32,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T21:02:32,979 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:32,979 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:32,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:32,980 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:32,982 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/44beb29d81f4cf0927ddafdfed5b677e/cf/59b203f75337441e8fde938f2e7c1162 to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/44beb29d81f4cf0927ddafdfed5b677e/cf/59b203f75337441e8fde938f2e7c1162 2024-12-16T21:02:32,982 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e90e8afe2ca383d37e67af524803fc54/cf/218c176aca13469ea5eac9317a4e6cae to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e90e8afe2ca383d37e67af524803fc54/cf/218c176aca13469ea5eac9317a4e6cae 2024-12-16T21:02:32,982 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/af64a4a5600e9932d60c8c31a3595921/cf/218c176aca13469ea5eac9317a4e6cae.e90e8afe2ca383d37e67af524803fc54 to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/af64a4a5600e9932d60c8c31a3595921/cf/218c176aca13469ea5eac9317a4e6cae.e90e8afe2ca383d37e67af524803fc54 2024-12-16T21:02:32,982 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/af64a4a5600e9932d60c8c31a3595921/cf/59b203f75337441e8fde938f2e7c1162.44beb29d81f4cf0927ddafdfed5b677e to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/af64a4a5600e9932d60c8c31a3595921/cf/59b203f75337441e8fde938f2e7c1162.44beb29d81f4cf0927ddafdfed5b677e 2024-12-16T21:02:32,985 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/44beb29d81f4cf0927ddafdfed5b677e/recovered.edits/8.seqid to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/44beb29d81f4cf0927ddafdfed5b677e/recovered.edits/8.seqid 2024-12-16T21:02:32,985 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e90e8afe2ca383d37e67af524803fc54/recovered.edits/8.seqid to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e90e8afe2ca383d37e67af524803fc54/recovered.edits/8.seqid 2024-12-16T21:02:32,985 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/af64a4a5600e9932d60c8c31a3595921/recovered.edits/12.seqid to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/af64a4a5600e9932d60c8c31a3595921/recovered.edits/12.seqid 2024-12-16T21:02:32,985 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/44beb29d81f4cf0927ddafdfed5b677e 2024-12-16T21:02:32,986 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e90e8afe2ca383d37e67af524803fc54 2024-12-16T21:02:32,986 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/af64a4a5600e9932d60c8c31a3595921 2024-12-16T21:02:32,986 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion-1 regions 2024-12-16T21:02:32,988 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=128, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T21:02:32,990 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of testtb-testExportFileSystemStateWithMergeRegion-1 from hbase:meta 2024-12-16T21:02:32,991 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' descriptor. 2024-12-16T21:02:32,992 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=128, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T21:02:32,992 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' from region states. 2024-12-16T21:02:32,992 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935129.af64a4a5600e9932d60c8c31a3595921.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734382952992"}]},"ts":"9223372036854775807"} 2024-12-16T21:02:32,994 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-16T21:02:32,994 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => af64a4a5600e9932d60c8c31a3595921, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935129.af64a4a5600e9932d60c8c31a3595921.', STARTKEY => '', ENDKEY => ''}] 2024-12-16T21:02:32,994 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemStateWithMergeRegion-1' as deleted. 2024-12-16T21:02:32,994 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734382952994"}]},"ts":"9223372036854775807"} 2024-12-16T21:02:32,996 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportFileSystemStateWithMergeRegion-1 state from META 2024-12-16T21:02:32,998 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=128, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T21:02:32,999 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 29 msec 2024-12-16T21:02:33,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-16T21:02:33,080 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 128 completed 2024-12-16T21:02:33,081 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T21:02:33,081 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T21:02:33,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=129, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T21:02:33,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129 2024-12-16T21:02:33,083 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382953083"}]},"ts":"1734382953083"} 2024-12-16T21:02:33,084 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLING in hbase:meta 2024-12-16T21:02:33,090 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLING 2024-12-16T21:02:33,090 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=130, ppid=129, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion}] 2024-12-16T21:02:33,092 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=84bea62b79efa5fd6cbb8c34a4272c04, UNASSIGN}, {pid=132, ppid=130, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=a85dc09c32e23ad6867b018fba70db05, UNASSIGN}] 2024-12-16T21:02:33,092 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=132, ppid=130, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=a85dc09c32e23ad6867b018fba70db05, UNASSIGN 2024-12-16T21:02:33,093 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=131, ppid=130, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=84bea62b79efa5fd6cbb8c34a4272c04, UNASSIGN 2024-12-16T21:02:33,093 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=132 updating hbase:meta row=a85dc09c32e23ad6867b018fba70db05, regionState=CLOSING, regionLocation=6b259cf15929,36861,1734382821216 2024-12-16T21:02:33,093 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=131 updating hbase:meta row=84bea62b79efa5fd6cbb8c34a4272c04, regionState=CLOSING, regionLocation=6b259cf15929,41713,1734382821274 2024-12-16T21:02:33,094 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T21:02:33,095 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; CloseRegionProcedure a85dc09c32e23ad6867b018fba70db05, server=6b259cf15929,36861,1734382821216}] 2024-12-16T21:02:33,095 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T21:02:33,095 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=134, ppid=131, state=RUNNABLE; CloseRegionProcedure 84bea62b79efa5fd6cbb8c34a4272c04, server=6b259cf15929,41713,1734382821274}] 2024-12-16T21:02:33,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129 2024-12-16T21:02:33,246 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6b259cf15929,41713,1734382821274 2024-12-16T21:02:33,246 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6b259cf15929,36861,1734382821216 2024-12-16T21:02:33,247 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(124): Close a85dc09c32e23ad6867b018fba70db05 2024-12-16T21:02:33,247 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] handler.UnassignRegionHandler(124): Close 84bea62b79efa5fd6cbb8c34a4272c04 2024-12-16T21:02:33,247 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T21:02:33,247 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T21:02:33,247 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1681): Closing a85dc09c32e23ad6867b018fba70db05, disabling compactions & flushes 2024-12-16T21:02:33,247 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1681): Closing 84bea62b79efa5fd6cbb8c34a4272c04, disabling compactions & flushes 2024-12-16T21:02:33,247 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1734382933798.a85dc09c32e23ad6867b018fba70db05. 2024-12-16T21:02:33,247 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1734382933798.a85dc09c32e23ad6867b018fba70db05. 2024-12-16T21:02:33,247 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1734382933798.84bea62b79efa5fd6cbb8c34a4272c04. 2024-12-16T21:02:33,247 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1734382933798.a85dc09c32e23ad6867b018fba70db05. after waiting 0 ms 2024-12-16T21:02:33,247 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1734382933798.84bea62b79efa5fd6cbb8c34a4272c04. 2024-12-16T21:02:33,247 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1734382933798.a85dc09c32e23ad6867b018fba70db05. 2024-12-16T21:02:33,247 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1734382933798.84bea62b79efa5fd6cbb8c34a4272c04. after waiting 0 ms 2024-12-16T21:02:33,247 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1734382933798.84bea62b79efa5fd6cbb8c34a4272c04. 2024-12-16T21:02:33,251 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion/a85dc09c32e23ad6867b018fba70db05/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T21:02:33,251 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion/84bea62b79efa5fd6cbb8c34a4272c04/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T21:02:33,251 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T21:02:33,252 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T21:02:33,252 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1734382933798.a85dc09c32e23ad6867b018fba70db05. 2024-12-16T21:02:33,252 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1635): Region close journal for a85dc09c32e23ad6867b018fba70db05: 2024-12-16T21:02:33,252 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion,,1734382933798.84bea62b79efa5fd6cbb8c34a4272c04. 2024-12-16T21:02:33,252 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1635): Region close journal for 84bea62b79efa5fd6cbb8c34a4272c04: 2024-12-16T21:02:33,253 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(170): Closed a85dc09c32e23ad6867b018fba70db05 2024-12-16T21:02:33,254 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=132 updating hbase:meta row=a85dc09c32e23ad6867b018fba70db05, regionState=CLOSED 2024-12-16T21:02:33,254 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] handler.UnassignRegionHandler(170): Closed 84bea62b79efa5fd6cbb8c34a4272c04 2024-12-16T21:02:33,254 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=131 updating hbase:meta row=84bea62b79efa5fd6cbb8c34a4272c04, regionState=CLOSED 2024-12-16T21:02:33,256 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-12-16T21:02:33,256 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; CloseRegionProcedure a85dc09c32e23ad6867b018fba70db05, server=6b259cf15929,36861,1734382821216 in 161 msec 2024-12-16T21:02:33,257 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=134, resume processing ppid=131 2024-12-16T21:02:33,258 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, ppid=130, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=a85dc09c32e23ad6867b018fba70db05, UNASSIGN in 164 msec 2024-12-16T21:02:33,258 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, ppid=131, state=SUCCESS; CloseRegionProcedure 84bea62b79efa5fd6cbb8c34a4272c04, server=6b259cf15929,41713,1734382821274 in 161 msec 2024-12-16T21:02:33,258 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-12-16T21:02:33,258 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=84bea62b79efa5fd6cbb8c34a4272c04, UNASSIGN in 165 msec 2024-12-16T21:02:33,260 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=130, resume processing ppid=129 2024-12-16T21:02:33,260 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, ppid=129, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 169 msec 2024-12-16T21:02:33,261 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382953261"}]},"ts":"1734382953261"} 2024-12-16T21:02:33,262 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLED in hbase:meta 2024-12-16T21:02:33,264 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLED 2024-12-16T21:02:33,266 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, state=SUCCESS; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 184 msec 2024-12-16T21:02:33,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129 2024-12-16T21:02:33,385 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 129 completed 2024-12-16T21:02:33,386 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T21:02:33,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=135, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T21:02:33,387 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=135, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T21:02:33,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T21:02:33,388 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=135, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T21:02:33,389 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42323 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T21:02:33,391 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion/a85dc09c32e23ad6867b018fba70db05 2024-12-16T21:02:33,391 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion/84bea62b79efa5fd6cbb8c34a4272c04 2024-12-16T21:02:33,392 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion/84bea62b79efa5fd6cbb8c34a4272c04/cf, FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion/84bea62b79efa5fd6cbb8c34a4272c04/recovered.edits] 2024-12-16T21:02:33,392 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion/a85dc09c32e23ad6867b018fba70db05/cf, FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion/a85dc09c32e23ad6867b018fba70db05/recovered.edits] 2024-12-16T21:02:33,393 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T21:02:33,393 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T21:02:33,393 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T21:02:33,394 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T21:02:33,394 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-16T21:02:33,394 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-16T21:02:33,394 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-16T21:02:33,398 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-16T21:02:33,400 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T21:02:33,400 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T21:02:33,400 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:33,400 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:33,400 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T21:02:33,400 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:33,400 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T21:02:33,400 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:33,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-12-16T21:02:33,402 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion/84bea62b79efa5fd6cbb8c34a4272c04/cf/85ec3e60ca5346fe9a2d4adf00aa8873 to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/84bea62b79efa5fd6cbb8c34a4272c04/cf/85ec3e60ca5346fe9a2d4adf00aa8873 2024-12-16T21:02:33,402 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion/a85dc09c32e23ad6867b018fba70db05/cf/2755eacfeee94b3fb6916dc2431fd242 to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/a85dc09c32e23ad6867b018fba70db05/cf/2755eacfeee94b3fb6916dc2431fd242 2024-12-16T21:02:33,405 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion/a85dc09c32e23ad6867b018fba70db05/recovered.edits/9.seqid to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/a85dc09c32e23ad6867b018fba70db05/recovered.edits/9.seqid 2024-12-16T21:02:33,405 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion/84bea62b79efa5fd6cbb8c34a4272c04/recovered.edits/9.seqid to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/84bea62b79efa5fd6cbb8c34a4272c04/recovered.edits/9.seqid 2024-12-16T21:02:33,405 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion/a85dc09c32e23ad6867b018fba70db05 2024-12-16T21:02:33,405 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithMergeRegion/84bea62b79efa5fd6cbb8c34a4272c04 2024-12-16T21:02:33,405 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion regions 2024-12-16T21:02:33,407 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=135, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T21:02:33,410 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithMergeRegion from hbase:meta 2024-12-16T21:02:33,412 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemStateWithMergeRegion' descriptor. 2024-12-16T21:02:33,412 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=135, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T21:02:33,412 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemStateWithMergeRegion' from region states. 2024-12-16T21:02:33,413 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1734382933798.84bea62b79efa5fd6cbb8c34a4272c04.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734382953412"}]},"ts":"9223372036854775807"} 2024-12-16T21:02:33,413 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1734382933798.a85dc09c32e23ad6867b018fba70db05.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734382953412"}]},"ts":"9223372036854775807"} 2024-12-16T21:02:33,414 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-16T21:02:33,414 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 84bea62b79efa5fd6cbb8c34a4272c04, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1734382933798.84bea62b79efa5fd6cbb8c34a4272c04.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => a85dc09c32e23ad6867b018fba70db05, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1734382933798.a85dc09c32e23ad6867b018fba70db05.', STARTKEY => '1', ENDKEY => ''}] 2024-12-16T21:02:33,414 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemStateWithMergeRegion' as deleted. 2024-12-16T21:02:33,414 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734382953414"}]},"ts":"9223372036854775807"} 2024-12-16T21:02:33,415 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportFileSystemStateWithMergeRegion state from META 2024-12-16T21:02:33,418 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=135, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T21:02:33,418 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, state=SUCCESS; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 32 msec 2024-12-16T21:02:33,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-12-16T21:02:33,502 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 135 completed 2024-12-16T21:02:33,512 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithMergeRegion" 2024-12-16T21:02:33,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-16T21:02:33,515 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion" 2024-12-16T21:02:33,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-16T21:02:33,518 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion-1" 2024-12-16T21:02:33,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-16T21:02:33,540 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=808 (was 800) Potentially hanging thread: ApplicationMasterLauncher #10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1891196410) connection to localhost/127.0.0.1:40841 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-715246216_22 at /127.0.0.1:51848 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x70b78df7-shared-pool-32 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-715246216_22 at /127.0.0.1:58230 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/6b259cf15929:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x70b78df7-shared-pool-33 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1557483669_1 at /127.0.0.1:58216 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x70b78df7-shared-pool-29 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40841 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1891196410) connection to localhost/127.0.0.1:40833 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ApplicationMasterLauncher #9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-715246216_22 at /127.0.0.1:50872 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 3641) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x70b78df7-shared-pool-31 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/6b259cf15929:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x70b78df7-shared-pool-28 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x70b78df7-shared-pool-30 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/6b259cf15929:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-4735 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1557483669_1 at /127.0.0.1:51836 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=821 (was 815) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=453 (was 451) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 17), AvailableMemoryMB=3304 (was 3557) 2024-12-16T21:02:33,540 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=808 is superior to 500 2024-12-16T21:02:33,557 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=808, OpenFileDescriptor=821, MaxFileDescriptor=1048576, SystemLoadAverage=453, ProcessCount=17, AvailableMemoryMB=3303 2024-12-16T21:02:33,557 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=808 is superior to 500 2024-12-16T21:02:33,558 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T21:02:33,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-16T21:02:33,559 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T21:02:33,560 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:02:33,560 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T21:02:33,560 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportExpiredSnapshot" procId is: 136 2024-12-16T21:02:33,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-16T21:02:33,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742164_1340 (size=407) 2024-12-16T21:02:33,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742164_1340 (size=407) 2024-12-16T21:02:33,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742164_1340 (size=407) 2024-12-16T21:02:33,568 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => fef0cc17f37d2f685fa3edfbbb56465a, NAME => 'testtb-testExportExpiredSnapshot,,1734382953558.fef0cc17f37d2f685fa3edfbbb56465a.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:02:33,569 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 6bd63f0b66fb54e0b600984f1e422b1d, NAME => 'testtb-testExportExpiredSnapshot,1,1734382953558.6bd63f0b66fb54e0b600984f1e422b1d.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:02:33,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742165_1341 (size=68) 2024-12-16T21:02:33,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742165_1341 (size=68) 2024-12-16T21:02:33,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742165_1341 (size=68) 2024-12-16T21:02:33,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742166_1342 (size=68) 2024-12-16T21:02:33,576 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportExpiredSnapshot,1,1734382953558.6bd63f0b66fb54e0b600984f1e422b1d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:02:33,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742166_1342 (size=68) 2024-12-16T21:02:33,576 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1681): Closing 6bd63f0b66fb54e0b600984f1e422b1d, disabling compactions & flushes 2024-12-16T21:02:33,576 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportExpiredSnapshot,1,1734382953558.6bd63f0b66fb54e0b600984f1e422b1d. 2024-12-16T21:02:33,576 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportExpiredSnapshot,1,1734382953558.6bd63f0b66fb54e0b600984f1e422b1d. 2024-12-16T21:02:33,576 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportExpiredSnapshot,1,1734382953558.6bd63f0b66fb54e0b600984f1e422b1d. after waiting 0 ms 2024-12-16T21:02:33,576 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportExpiredSnapshot,1,1734382953558.6bd63f0b66fb54e0b600984f1e422b1d. 2024-12-16T21:02:33,576 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportExpiredSnapshot,1,1734382953558.6bd63f0b66fb54e0b600984f1e422b1d. 2024-12-16T21:02:33,576 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1635): Region close journal for 6bd63f0b66fb54e0b600984f1e422b1d: 2024-12-16T21:02:33,577 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportExpiredSnapshot,,1734382953558.fef0cc17f37d2f685fa3edfbbb56465a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:02:33,577 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1681): Closing fef0cc17f37d2f685fa3edfbbb56465a, disabling compactions & flushes 2024-12-16T21:02:33,577 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportExpiredSnapshot,,1734382953558.fef0cc17f37d2f685fa3edfbbb56465a. 2024-12-16T21:02:33,577 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportExpiredSnapshot,,1734382953558.fef0cc17f37d2f685fa3edfbbb56465a. 2024-12-16T21:02:33,577 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportExpiredSnapshot,,1734382953558.fef0cc17f37d2f685fa3edfbbb56465a. after waiting 0 ms 2024-12-16T21:02:33,577 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportExpiredSnapshot,,1734382953558.fef0cc17f37d2f685fa3edfbbb56465a. 2024-12-16T21:02:33,577 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportExpiredSnapshot,,1734382953558.fef0cc17f37d2f685fa3edfbbb56465a. 2024-12-16T21:02:33,577 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1635): Region close journal for fef0cc17f37d2f685fa3edfbbb56465a: 2024-12-16T21:02:33,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742166_1342 (size=68) 2024-12-16T21:02:33,577 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T21:02:33,578 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,1,1734382953558.6bd63f0b66fb54e0b600984f1e422b1d.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1734382953578"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734382953578"}]},"ts":"1734382953578"} 2024-12-16T21:02:33,578 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,,1734382953558.fef0cc17f37d2f685fa3edfbbb56465a.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1734382953578"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734382953578"}]},"ts":"1734382953578"} 2024-12-16T21:02:33,579 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-16T21:02:33,580 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T21:02:33,580 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382953580"}]},"ts":"1734382953580"} 2024-12-16T21:02:33,581 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-16T21:02:33,600 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(202): Hosts are {6b259cf15929=0} racks are {/default-rack=0} 2024-12-16T21:02:33,601 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-16T21:02:33,601 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-16T21:02:33,601 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-16T21:02:33,601 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-16T21:02:33,601 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-16T21:02:33,601 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-16T21:02:33,601 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-16T21:02:33,601 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=fef0cc17f37d2f685fa3edfbbb56465a, ASSIGN}, {pid=138, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=6bd63f0b66fb54e0b600984f1e422b1d, ASSIGN}] 2024-12-16T21:02:33,602 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=137, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=fef0cc17f37d2f685fa3edfbbb56465a, ASSIGN 2024-12-16T21:02:33,602 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=138, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=6bd63f0b66fb54e0b600984f1e422b1d, ASSIGN 2024-12-16T21:02:33,603 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=137, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=fef0cc17f37d2f685fa3edfbbb56465a, ASSIGN; state=OFFLINE, location=6b259cf15929,41713,1734382821274; forceNewPlan=false, retain=false 2024-12-16T21:02:33,603 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=138, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=6bd63f0b66fb54e0b600984f1e422b1d, ASSIGN; state=OFFLINE, location=6b259cf15929,42323,1734382821139; forceNewPlan=false, retain=false 2024-12-16T21:02:33,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-16T21:02:33,753 INFO [6b259cf15929:34343 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-16T21:02:33,754 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=137 updating hbase:meta row=fef0cc17f37d2f685fa3edfbbb56465a, regionState=OPENING, regionLocation=6b259cf15929,41713,1734382821274 2024-12-16T21:02:33,754 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=138 updating hbase:meta row=6bd63f0b66fb54e0b600984f1e422b1d, regionState=OPENING, regionLocation=6b259cf15929,42323,1734382821139 2024-12-16T21:02:33,755 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; OpenRegionProcedure 6bd63f0b66fb54e0b600984f1e422b1d, server=6b259cf15929,42323,1734382821139}] 2024-12-16T21:02:33,756 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=140, ppid=137, state=RUNNABLE; OpenRegionProcedure fef0cc17f37d2f685fa3edfbbb56465a, server=6b259cf15929,41713,1734382821274}] 2024-12-16T21:02:33,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-16T21:02:33,907 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6b259cf15929,42323,1734382821139 2024-12-16T21:02:33,907 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6b259cf15929,41713,1734382821274 2024-12-16T21:02:33,909 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] handler.AssignRegionHandler(135): Open testtb-testExportExpiredSnapshot,1,1734382953558.6bd63f0b66fb54e0b600984f1e422b1d. 2024-12-16T21:02:33,909 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] handler.AssignRegionHandler(135): Open testtb-testExportExpiredSnapshot,,1734382953558.fef0cc17f37d2f685fa3edfbbb56465a. 2024-12-16T21:02:33,910 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(7285): Opening region: {ENCODED => fef0cc17f37d2f685fa3edfbbb56465a, NAME => 'testtb-testExportExpiredSnapshot,,1734382953558.fef0cc17f37d2f685fa3edfbbb56465a.', STARTKEY => '', ENDKEY => '1'} 2024-12-16T21:02:33,910 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7285): Opening region: {ENCODED => 6bd63f0b66fb54e0b600984f1e422b1d, NAME => 'testtb-testExportExpiredSnapshot,1,1734382953558.6bd63f0b66fb54e0b600984f1e422b1d.', STARTKEY => '1', ENDKEY => ''} 2024-12-16T21:02:33,910 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,1,1734382953558.6bd63f0b66fb54e0b600984f1e422b1d. service=AccessControlService 2024-12-16T21:02:33,910 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,,1734382953558.fef0cc17f37d2f685fa3edfbbb56465a. service=AccessControlService 2024-12-16T21:02:33,910 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T21:02:33,910 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T21:02:33,910 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot 6bd63f0b66fb54e0b600984f1e422b1d 2024-12-16T21:02:33,910 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot fef0cc17f37d2f685fa3edfbbb56465a 2024-12-16T21:02:33,910 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(894): Instantiated testtb-testExportExpiredSnapshot,1,1734382953558.6bd63f0b66fb54e0b600984f1e422b1d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:02:33,910 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(894): Instantiated testtb-testExportExpiredSnapshot,,1734382953558.fef0cc17f37d2f685fa3edfbbb56465a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:02:33,910 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(7327): checking encryption for fef0cc17f37d2f685fa3edfbbb56465a 2024-12-16T21:02:33,910 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7327): checking encryption for 6bd63f0b66fb54e0b600984f1e422b1d 2024-12-16T21:02:33,910 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(7330): checking classloading for fef0cc17f37d2f685fa3edfbbb56465a 2024-12-16T21:02:33,910 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7330): checking classloading for 6bd63f0b66fb54e0b600984f1e422b1d 2024-12-16T21:02:33,911 INFO [StoreOpener-6bd63f0b66fb54e0b600984f1e422b1d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 6bd63f0b66fb54e0b600984f1e422b1d 2024-12-16T21:02:33,911 INFO [StoreOpener-fef0cc17f37d2f685fa3edfbbb56465a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region fef0cc17f37d2f685fa3edfbbb56465a 2024-12-16T21:02:33,913 INFO [StoreOpener-6bd63f0b66fb54e0b600984f1e422b1d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6bd63f0b66fb54e0b600984f1e422b1d columnFamilyName cf 2024-12-16T21:02:33,913 INFO [StoreOpener-fef0cc17f37d2f685fa3edfbbb56465a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fef0cc17f37d2f685fa3edfbbb56465a columnFamilyName cf 2024-12-16T21:02:33,913 DEBUG [StoreOpener-6bd63f0b66fb54e0b600984f1e422b1d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:02:33,913 DEBUG [StoreOpener-fef0cc17f37d2f685fa3edfbbb56465a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:02:33,913 INFO [StoreOpener-fef0cc17f37d2f685fa3edfbbb56465a-1 {}] regionserver.HStore(327): Store=fef0cc17f37d2f685fa3edfbbb56465a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T21:02:33,913 INFO [StoreOpener-6bd63f0b66fb54e0b600984f1e422b1d-1 {}] regionserver.HStore(327): Store=6bd63f0b66fb54e0b600984f1e422b1d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T21:02:33,914 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportExpiredSnapshot/fef0cc17f37d2f685fa3edfbbb56465a 2024-12-16T21:02:33,914 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportExpiredSnapshot/6bd63f0b66fb54e0b600984f1e422b1d 2024-12-16T21:02:33,914 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportExpiredSnapshot/6bd63f0b66fb54e0b600984f1e422b1d 2024-12-16T21:02:33,914 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportExpiredSnapshot/fef0cc17f37d2f685fa3edfbbb56465a 2024-12-16T21:02:33,916 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(1085): writing seq id for 6bd63f0b66fb54e0b600984f1e422b1d 2024-12-16T21:02:33,916 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(1085): writing seq id for fef0cc17f37d2f685fa3edfbbb56465a 2024-12-16T21:02:33,918 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportExpiredSnapshot/6bd63f0b66fb54e0b600984f1e422b1d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T21:02:33,918 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportExpiredSnapshot/fef0cc17f37d2f685fa3edfbbb56465a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T21:02:33,918 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(1102): Opened 6bd63f0b66fb54e0b600984f1e422b1d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65571176, jitterRate=-0.022913336753845215}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T21:02:33,918 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(1102): Opened fef0cc17f37d2f685fa3edfbbb56465a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75010936, jitterRate=0.11775004863739014}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T21:02:33,919 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(1001): Region open journal for 6bd63f0b66fb54e0b600984f1e422b1d: 2024-12-16T21:02:33,919 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(1001): Region open journal for fef0cc17f37d2f685fa3edfbbb56465a: 2024-12-16T21:02:33,920 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportExpiredSnapshot,,1734382953558.fef0cc17f37d2f685fa3edfbbb56465a., pid=140, masterSystemTime=1734382953907 2024-12-16T21:02:33,920 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportExpiredSnapshot,1,1734382953558.6bd63f0b66fb54e0b600984f1e422b1d., pid=139, masterSystemTime=1734382953907 2024-12-16T21:02:33,921 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportExpiredSnapshot,1,1734382953558.6bd63f0b66fb54e0b600984f1e422b1d. 2024-12-16T21:02:33,921 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] handler.AssignRegionHandler(164): Opened testtb-testExportExpiredSnapshot,1,1734382953558.6bd63f0b66fb54e0b600984f1e422b1d. 2024-12-16T21:02:33,922 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=138 updating hbase:meta row=6bd63f0b66fb54e0b600984f1e422b1d, regionState=OPEN, openSeqNum=2, regionLocation=6b259cf15929,42323,1734382821139 2024-12-16T21:02:33,922 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportExpiredSnapshot,,1734382953558.fef0cc17f37d2f685fa3edfbbb56465a. 2024-12-16T21:02:33,922 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] handler.AssignRegionHandler(164): Opened testtb-testExportExpiredSnapshot,,1734382953558.fef0cc17f37d2f685fa3edfbbb56465a. 2024-12-16T21:02:33,922 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=137 updating hbase:meta row=fef0cc17f37d2f685fa3edfbbb56465a, regionState=OPEN, openSeqNum=2, regionLocation=6b259cf15929,41713,1734382821274 2024-12-16T21:02:33,924 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-12-16T21:02:33,924 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; OpenRegionProcedure 6bd63f0b66fb54e0b600984f1e422b1d, server=6b259cf15929,42323,1734382821139 in 168 msec 2024-12-16T21:02:33,925 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=140, resume processing ppid=137 2024-12-16T21:02:33,925 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, ppid=136, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=6bd63f0b66fb54e0b600984f1e422b1d, ASSIGN in 323 msec 2024-12-16T21:02:33,926 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, ppid=137, state=SUCCESS; OpenRegionProcedure fef0cc17f37d2f685fa3edfbbb56465a, server=6b259cf15929,41713,1734382821274 in 168 msec 2024-12-16T21:02:33,927 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-12-16T21:02:33,927 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=fef0cc17f37d2f685fa3edfbbb56465a, ASSIGN in 324 msec 2024-12-16T21:02:33,927 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T21:02:33,927 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382953927"}]},"ts":"1734382953927"} 2024-12-16T21:02:33,928 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-16T21:02:33,930 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T21:02:33,931 DEBUG [PEWorker-4 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportExpiredSnapshot jenkins: RWXCA 2024-12-16T21:02:33,932 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42323 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-16T21:02:33,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:33,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:33,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:33,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:33,936 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:33,936 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:33,936 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:33,937 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:33,938 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; CreateTableProcedure table=testtb-testExportExpiredSnapshot in 378 msec 2024-12-16T21:02:34,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-16T21:02:34,165 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportExpiredSnapshot, procId: 136 completed 2024-12-16T21:02:34,165 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-12-16T21:02:34,165 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T21:02:34,168 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-12-16T21:02:34,169 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T21:02:34,169 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportExpiredSnapshot assigned. 2024-12-16T21:02:34,172 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-16T21:02:34,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734382954172 (current time:1734382954172). 2024-12-16T21:02:34,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-16T21:02:34,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-16T21:02:34,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T21:02:34,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2a18918e to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@65909de6 2024-12-16T21:02:34,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46a8a51, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:02:34,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:02:34,181 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37888, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:02:34,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2a18918e to 127.0.0.1:61742 2024-12-16T21:02:34,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:02:34,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x25c35179 to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@44dd878a 2024-12-16T21:02:34,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1435b542, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:02:34,191 DEBUG [hconnection-0x16e55010-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:02:34,192 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37892, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:02:34,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:02:34,194 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41052, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:02:34,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x25c35179 to 127.0.0.1:61742 2024-12-16T21:02:34,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:02:34,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-16T21:02:34,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T21:02:34,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=141, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-16T21:02:34,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 141 2024-12-16T21:02:34,198 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-16T21:02:34,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-16T21:02:34,199 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T21:02:34,201 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T21:02:34,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742167_1343 (size=170) 2024-12-16T21:02:34,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742167_1343 (size=170) 2024-12-16T21:02:34,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742167_1343 (size=170) 2024-12-16T21:02:34,210 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T21:02:34,210 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=142, ppid=141, state=RUNNABLE; SnapshotRegionProcedure fef0cc17f37d2f685fa3edfbbb56465a}, {pid=143, ppid=141, state=RUNNABLE; SnapshotRegionProcedure 6bd63f0b66fb54e0b600984f1e422b1d}] 2024-12-16T21:02:34,211 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=143, ppid=141, state=RUNNABLE; SnapshotRegionProcedure 6bd63f0b66fb54e0b600984f1e422b1d 2024-12-16T21:02:34,211 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=142, ppid=141, state=RUNNABLE; SnapshotRegionProcedure fef0cc17f37d2f685fa3edfbbb56465a 2024-12-16T21:02:34,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-16T21:02:34,362 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6b259cf15929,42323,1734382821139 2024-12-16T21:02:34,362 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6b259cf15929,41713,1734382821274 2024-12-16T21:02:34,362 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41713 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=142 2024-12-16T21:02:34,362 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42323 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=143 2024-12-16T21:02:34,363 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1734382953558.6bd63f0b66fb54e0b600984f1e422b1d. 2024-12-16T21:02:34,363 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1734382953558.fef0cc17f37d2f685fa3edfbbb56465a. 2024-12-16T21:02:34,363 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2538): Flush status journal for fef0cc17f37d2f685fa3edfbbb56465a: 2024-12-16T21:02:34,363 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for 6bd63f0b66fb54e0b600984f1e422b1d: 2024-12-16T21:02:34,363 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1734382953558.fef0cc17f37d2f685fa3edfbbb56465a. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-16T21:02:34,363 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1734382953558.6bd63f0b66fb54e0b600984f1e422b1d. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-16T21:02:34,363 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1734382953558.6bd63f0b66fb54e0b600984f1e422b1d.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-16T21:02:34,363 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1734382953558.fef0cc17f37d2f685fa3edfbbb56465a.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-16T21:02:34,363 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T21:02:34,363 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T21:02:34,363 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-16T21:02:34,363 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-16T21:02:34,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742169_1345 (size=71) 2024-12-16T21:02:34,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742168_1344 (size=71) 2024-12-16T21:02:34,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742169_1345 (size=71) 2024-12-16T21:02:34,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742168_1344 (size=71) 2024-12-16T21:02:34,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742168_1344 (size=71) 2024-12-16T21:02:34,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742169_1345 (size=71) 2024-12-16T21:02:34,371 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1734382953558.fef0cc17f37d2f685fa3edfbbb56465a. 2024-12-16T21:02:34,371 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1734382953558.6bd63f0b66fb54e0b600984f1e422b1d. 2024-12-16T21:02:34,371 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=142 2024-12-16T21:02:34,371 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143 2024-12-16T21:02:34,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.HMaster(4106): Remote procedure done, pid=143 2024-12-16T21:02:34,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster(4106): Remote procedure done, pid=142 2024-12-16T21:02:34,371 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region fef0cc17f37d2f685fa3edfbbb56465a 2024-12-16T21:02:34,371 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region 6bd63f0b66fb54e0b600984f1e422b1d 2024-12-16T21:02:34,371 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=143, ppid=141, state=RUNNABLE; SnapshotRegionProcedure 6bd63f0b66fb54e0b600984f1e422b1d 2024-12-16T21:02:34,371 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=142, ppid=141, state=RUNNABLE; SnapshotRegionProcedure fef0cc17f37d2f685fa3edfbbb56465a 2024-12-16T21:02:34,373 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=141, state=SUCCESS; SnapshotRegionProcedure 6bd63f0b66fb54e0b600984f1e422b1d in 162 msec 2024-12-16T21:02:34,374 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=142, resume processing ppid=141 2024-12-16T21:02:34,374 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, ppid=141, state=SUCCESS; SnapshotRegionProcedure fef0cc17f37d2f685fa3edfbbb56465a in 162 msec 2024-12-16T21:02:34,374 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T21:02:34,375 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T21:02:34,375 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T21:02:34,375 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportExpiredSnapshot 2024-12-16T21:02:34,376 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot 2024-12-16T21:02:34,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742170_1346 (size=552) 2024-12-16T21:02:34,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742170_1346 (size=552) 2024-12-16T21:02:34,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742170_1346 (size=552) 2024-12-16T21:02:34,385 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T21:02:34,389 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T21:02:34,389 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/emptySnaptb0-testExportExpiredSnapshot 2024-12-16T21:02:34,390 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T21:02:34,390 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 141 2024-12-16T21:02:34,391 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 194 msec 2024-12-16T21:02:34,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-16T21:02:34,500 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot, procId: 141 completed 2024-12-16T21:02:34,507 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41713 {}] regionserver.HRegion(8254): writing data to region testtb-testExportExpiredSnapshot,,1734382953558.fef0cc17f37d2f685fa3edfbbb56465a. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T21:02:34,509 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42323 {}] regionserver.HRegion(8254): writing data to region testtb-testExportExpiredSnapshot,1,1734382953558.6bd63f0b66fb54e0b600984f1e422b1d. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T21:02:34,512 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-12-16T21:02:34,512 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportExpiredSnapshot,,1734382953558.fef0cc17f37d2f685fa3edfbbb56465a. 2024-12-16T21:02:34,512 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T21:02:34,521 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-16T21:02:34,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734382954521 (current time:1734382954521). 2024-12-16T21:02:34,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-16T21:02:34,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-16T21:02:34,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T21:02:34,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2a359623 to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@36e1f26b 2024-12-16T21:02:34,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4bd61e0c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:02:34,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:02:34,528 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37896, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:02:34,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2a359623 to 127.0.0.1:61742 2024-12-16T21:02:34,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:02:34,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x03bd82cc to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3a765a7a 2024-12-16T21:02:34,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@412b1546, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:02:34,534 DEBUG [hconnection-0x382b5c9d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:02:34,535 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37912, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:02:34,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:02:34,537 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41060, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:02:34,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x03bd82cc to 127.0.0.1:61742 2024-12-16T21:02:34,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:02:34,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-16T21:02:34,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T21:02:34,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-16T21:02:34,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 144 2024-12-16T21:02:34,540 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-16T21:02:34,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-16T21:02:34,541 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T21:02:34,542 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T21:02:34,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742171_1347 (size=165) 2024-12-16T21:02:34,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742171_1347 (size=165) 2024-12-16T21:02:34,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742171_1347 (size=165) 2024-12-16T21:02:34,556 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T21:02:34,556 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE; SnapshotRegionProcedure fef0cc17f37d2f685fa3edfbbb56465a}, {pid=146, ppid=144, state=RUNNABLE; SnapshotRegionProcedure 6bd63f0b66fb54e0b600984f1e422b1d}] 2024-12-16T21:02:34,556 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=146, ppid=144, state=RUNNABLE; SnapshotRegionProcedure 6bd63f0b66fb54e0b600984f1e422b1d 2024-12-16T21:02:34,557 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=145, ppid=144, state=RUNNABLE; SnapshotRegionProcedure fef0cc17f37d2f685fa3edfbbb56465a 2024-12-16T21:02:34,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-16T21:02:34,707 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6b259cf15929,42323,1734382821139 2024-12-16T21:02:34,707 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6b259cf15929,41713,1734382821274 2024-12-16T21:02:34,708 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41713 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=145 2024-12-16T21:02:34,708 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42323 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=146 2024-12-16T21:02:34,708 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1734382953558.fef0cc17f37d2f685fa3edfbbb56465a. 2024-12-16T21:02:34,708 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1734382953558.6bd63f0b66fb54e0b600984f1e422b1d. 2024-12-16T21:02:34,708 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegion(2837): Flushing fef0cc17f37d2f685fa3edfbbb56465a 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-16T21:02:34,709 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HRegion(2837): Flushing 6bd63f0b66fb54e0b600984f1e422b1d 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-16T21:02:34,734 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportExpiredSnapshot/fef0cc17f37d2f685fa3edfbbb56465a/.tmp/cf/5fc342f9620049878af1513df1c52745 is 71, key is 03451f8d20530593ffb6bf30c1363d90/cf:q/1734382954507/Put/seqid=0 2024-12-16T21:02:34,734 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportExpiredSnapshot/6bd63f0b66fb54e0b600984f1e422b1d/.tmp/cf/0296fb278da348a89727581e963d4d05 is 71, key is 10a74c2d16567cefa40d2fe99cd064be/cf:q/1734382954509/Put/seqid=0 2024-12-16T21:02:34,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742172_1348 (size=5422) 2024-12-16T21:02:34,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742172_1348 (size=5422) 2024-12-16T21:02:34,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742172_1348 (size=5422) 2024-12-16T21:02:34,753 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=333 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportExpiredSnapshot/fef0cc17f37d2f685fa3edfbbb56465a/.tmp/cf/5fc342f9620049878af1513df1c52745 2024-12-16T21:02:34,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742173_1349 (size=8188) 2024-12-16T21:02:34,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742173_1349 (size=8188) 2024-12-16T21:02:34,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742173_1349 (size=8188) 2024-12-16T21:02:34,755 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.93 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportExpiredSnapshot/6bd63f0b66fb54e0b600984f1e422b1d/.tmp/cf/0296fb278da348a89727581e963d4d05 2024-12-16T21:02:34,759 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportExpiredSnapshot/fef0cc17f37d2f685fa3edfbbb56465a/.tmp/cf/5fc342f9620049878af1513df1c52745 as hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportExpiredSnapshot/fef0cc17f37d2f685fa3edfbbb56465a/cf/5fc342f9620049878af1513df1c52745 2024-12-16T21:02:34,759 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportExpiredSnapshot/6bd63f0b66fb54e0b600984f1e422b1d/.tmp/cf/0296fb278da348a89727581e963d4d05 as hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportExpiredSnapshot/6bd63f0b66fb54e0b600984f1e422b1d/cf/0296fb278da348a89727581e963d4d05 2024-12-16T21:02:34,764 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportExpiredSnapshot/6bd63f0b66fb54e0b600984f1e422b1d/cf/0296fb278da348a89727581e963d4d05, entries=45, sequenceid=6, filesize=8.0 K 2024-12-16T21:02:34,764 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportExpiredSnapshot/fef0cc17f37d2f685fa3edfbbb56465a/cf/5fc342f9620049878af1513df1c52745, entries=5, sequenceid=6, filesize=5.3 K 2024-12-16T21:02:34,765 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HRegion(3040): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for 6bd63f0b66fb54e0b600984f1e422b1d in 57ms, sequenceid=6, compaction requested=false 2024-12-16T21:02:34,765 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegion(3040): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for fef0cc17f37d2f685fa3edfbbb56465a in 57ms, sequenceid=6, compaction requested=false 2024-12-16T21:02:34,765 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-12-16T21:02:34,765 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-12-16T21:02:34,765 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegion(2538): Flush status journal for fef0cc17f37d2f685fa3edfbbb56465a: 2024-12-16T21:02:34,765 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HRegion(2538): Flush status journal for 6bd63f0b66fb54e0b600984f1e422b1d: 2024-12-16T21:02:34,765 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1734382953558.6bd63f0b66fb54e0b600984f1e422b1d. for snaptb0-testExportExpiredSnapshot completed. 2024-12-16T21:02:34,765 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1734382953558.fef0cc17f37d2f685fa3edfbbb56465a. for snaptb0-testExportExpiredSnapshot completed. 2024-12-16T21:02:34,766 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1734382953558.6bd63f0b66fb54e0b600984f1e422b1d.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-16T21:02:34,766 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T21:02:34,766 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1734382953558.fef0cc17f37d2f685fa3edfbbb56465a.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-16T21:02:34,766 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportExpiredSnapshot/6bd63f0b66fb54e0b600984f1e422b1d/cf/0296fb278da348a89727581e963d4d05] hfiles 2024-12-16T21:02:34,766 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportExpiredSnapshot/6bd63f0b66fb54e0b600984f1e422b1d/cf/0296fb278da348a89727581e963d4d05 for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-16T21:02:34,766 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T21:02:34,766 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportExpiredSnapshot/fef0cc17f37d2f685fa3edfbbb56465a/cf/5fc342f9620049878af1513df1c52745] hfiles 2024-12-16T21:02:34,766 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportExpiredSnapshot/fef0cc17f37d2f685fa3edfbbb56465a/cf/5fc342f9620049878af1513df1c52745 for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-16T21:02:34,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742174_1350 (size=110) 2024-12-16T21:02:34,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742174_1350 (size=110) 2024-12-16T21:02:34,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742174_1350 (size=110) 2024-12-16T21:02:34,774 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1734382953558.fef0cc17f37d2f685fa3edfbbb56465a. 2024-12-16T21:02:34,774 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=145 2024-12-16T21:02:34,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.HMaster(4106): Remote procedure done, pid=145 2024-12-16T21:02:34,775 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region fef0cc17f37d2f685fa3edfbbb56465a 2024-12-16T21:02:34,775 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=145, ppid=144, state=RUNNABLE; SnapshotRegionProcedure fef0cc17f37d2f685fa3edfbbb56465a 2024-12-16T21:02:34,777 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; SnapshotRegionProcedure fef0cc17f37d2f685fa3edfbbb56465a in 220 msec 2024-12-16T21:02:34,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742175_1351 (size=110) 2024-12-16T21:02:34,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742175_1351 (size=110) 2024-12-16T21:02:34,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742175_1351 (size=110) 2024-12-16T21:02:34,780 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1734382953558.6bd63f0b66fb54e0b600984f1e422b1d. 2024-12-16T21:02:34,780 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=146 2024-12-16T21:02:34,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.HMaster(4106): Remote procedure done, pid=146 2024-12-16T21:02:34,781 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region 6bd63f0b66fb54e0b600984f1e422b1d 2024-12-16T21:02:34,781 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=146, ppid=144, state=RUNNABLE; SnapshotRegionProcedure 6bd63f0b66fb54e0b600984f1e422b1d 2024-12-16T21:02:34,783 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=146, resume processing ppid=144 2024-12-16T21:02:34,783 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, ppid=144, state=SUCCESS; SnapshotRegionProcedure 6bd63f0b66fb54e0b600984f1e422b1d in 225 msec 2024-12-16T21:02:34,783 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T21:02:34,783 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T21:02:34,784 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T21:02:34,784 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportExpiredSnapshot 2024-12-16T21:02:34,784 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot 2024-12-16T21:02:34,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742176_1352 (size=630) 2024-12-16T21:02:34,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742176_1352 (size=630) 2024-12-16T21:02:34,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742176_1352 (size=630) 2024-12-16T21:02:34,794 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T21:02:34,798 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T21:02:34,798 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testExportExpiredSnapshot 2024-12-16T21:02:34,799 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T21:02:34,799 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 144 2024-12-16T21:02:34,800 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 261 msec 2024-12-16T21:02:34,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-16T21:02:34,842 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot, procId: 144 completed 2024-12-16T21:02:34,843 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T21:02:34,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=147, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testExportExpiredSnapshot 2024-12-16T21:02:34,844 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T21:02:34,844 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:02:34,844 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportExpiredSnapshot" procId is: 147 2024-12-16T21:02:34,845 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T21:02:34,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-16T21:02:34,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742177_1353 (size=400) 2024-12-16T21:02:34,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742177_1353 (size=400) 2024-12-16T21:02:34,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742177_1353 (size=400) 2024-12-16T21:02:34,852 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 5700cdf8b31b01a35a4ac5bf632afccd, NAME => 'testExportExpiredSnapshot,,1734382954843.5700cdf8b31b01a35a4ac5bf632afccd.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:02:34,853 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 3f3089e09ebc056cacee8e9b5d95cda7, NAME => 'testExportExpiredSnapshot,1,1734382954843.3f3089e09ebc056cacee8e9b5d95cda7.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:02:34,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742179_1355 (size=61) 2024-12-16T21:02:34,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742178_1354 (size=61) 2024-12-16T21:02:34,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742179_1355 (size=61) 2024-12-16T21:02:34,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742178_1354 (size=61) 2024-12-16T21:02:34,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742179_1355 (size=61) 2024-12-16T21:02:34,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742178_1354 (size=61) 2024-12-16T21:02:34,860 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(894): Instantiated testExportExpiredSnapshot,,1734382954843.5700cdf8b31b01a35a4ac5bf632afccd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:02:34,860 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(894): Instantiated testExportExpiredSnapshot,1,1734382954843.3f3089e09ebc056cacee8e9b5d95cda7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:02:34,860 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1681): Closing 5700cdf8b31b01a35a4ac5bf632afccd, disabling compactions & flushes 2024-12-16T21:02:34,860 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1703): Closing region testExportExpiredSnapshot,,1734382954843.5700cdf8b31b01a35a4ac5bf632afccd. 2024-12-16T21:02:34,860 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportExpiredSnapshot,,1734382954843.5700cdf8b31b01a35a4ac5bf632afccd. 2024-12-16T21:02:34,860 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1681): Closing 3f3089e09ebc056cacee8e9b5d95cda7, disabling compactions & flushes 2024-12-16T21:02:34,860 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testExportExpiredSnapshot,,1734382954843.5700cdf8b31b01a35a4ac5bf632afccd. after waiting 0 ms 2024-12-16T21:02:34,860 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1703): Closing region testExportExpiredSnapshot,1,1734382954843.3f3089e09ebc056cacee8e9b5d95cda7. 2024-12-16T21:02:34,860 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testExportExpiredSnapshot,,1734382954843.5700cdf8b31b01a35a4ac5bf632afccd. 2024-12-16T21:02:34,860 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportExpiredSnapshot,1,1734382954843.3f3089e09ebc056cacee8e9b5d95cda7. 2024-12-16T21:02:34,860 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1922): Closed testExportExpiredSnapshot,,1734382954843.5700cdf8b31b01a35a4ac5bf632afccd. 2024-12-16T21:02:34,860 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testExportExpiredSnapshot,1,1734382954843.3f3089e09ebc056cacee8e9b5d95cda7. after waiting 0 ms 2024-12-16T21:02:34,860 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1635): Region close journal for 5700cdf8b31b01a35a4ac5bf632afccd: 2024-12-16T21:02:34,860 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testExportExpiredSnapshot,1,1734382954843.3f3089e09ebc056cacee8e9b5d95cda7. 2024-12-16T21:02:34,860 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1922): Closed testExportExpiredSnapshot,1,1734382954843.3f3089e09ebc056cacee8e9b5d95cda7. 2024-12-16T21:02:34,860 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1635): Region close journal for 3f3089e09ebc056cacee8e9b5d95cda7: 2024-12-16T21:02:34,861 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T21:02:34,861 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,,1734382954843.5700cdf8b31b01a35a4ac5bf632afccd.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1734382954861"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734382954861"}]},"ts":"1734382954861"} 2024-12-16T21:02:34,861 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,1,1734382954843.3f3089e09ebc056cacee8e9b5d95cda7.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1734382954861"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734382954861"}]},"ts":"1734382954861"} 2024-12-16T21:02:34,863 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-16T21:02:34,863 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T21:02:34,864 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382954863"}]},"ts":"1734382954863"} 2024-12-16T21:02:34,865 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-16T21:02:34,869 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(202): Hosts are {6b259cf15929=0} racks are {/default-rack=0} 2024-12-16T21:02:34,870 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-16T21:02:34,870 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-16T21:02:34,870 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-16T21:02:34,870 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-16T21:02:34,870 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-16T21:02:34,870 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-16T21:02:34,870 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-16T21:02:34,870 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=5700cdf8b31b01a35a4ac5bf632afccd, ASSIGN}, {pid=149, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=3f3089e09ebc056cacee8e9b5d95cda7, ASSIGN}] 2024-12-16T21:02:34,871 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=149, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=3f3089e09ebc056cacee8e9b5d95cda7, ASSIGN 2024-12-16T21:02:34,871 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=5700cdf8b31b01a35a4ac5bf632afccd, ASSIGN 2024-12-16T21:02:34,871 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=149, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=3f3089e09ebc056cacee8e9b5d95cda7, ASSIGN; state=OFFLINE, location=6b259cf15929,41713,1734382821274; forceNewPlan=false, retain=false 2024-12-16T21:02:34,871 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=5700cdf8b31b01a35a4ac5bf632afccd, ASSIGN; state=OFFLINE, location=6b259cf15929,42323,1734382821139; forceNewPlan=false, retain=false 2024-12-16T21:02:34,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-16T21:02:35,022 INFO [6b259cf15929:34343 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-16T21:02:35,022 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=149 updating hbase:meta row=3f3089e09ebc056cacee8e9b5d95cda7, regionState=OPENING, regionLocation=6b259cf15929,41713,1734382821274 2024-12-16T21:02:35,022 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=5700cdf8b31b01a35a4ac5bf632afccd, regionState=OPENING, regionLocation=6b259cf15929,42323,1734382821139 2024-12-16T21:02:35,023 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=150, ppid=149, state=RUNNABLE; OpenRegionProcedure 3f3089e09ebc056cacee8e9b5d95cda7, server=6b259cf15929,41713,1734382821274}] 2024-12-16T21:02:35,024 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=148, state=RUNNABLE; OpenRegionProcedure 5700cdf8b31b01a35a4ac5bf632afccd, server=6b259cf15929,42323,1734382821139}] 2024-12-16T21:02:35,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-16T21:02:35,175 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6b259cf15929,42323,1734382821139 2024-12-16T21:02:35,175 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6b259cf15929,41713,1734382821274 2024-12-16T21:02:35,178 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] handler.AssignRegionHandler(135): Open testExportExpiredSnapshot,,1734382954843.5700cdf8b31b01a35a4ac5bf632afccd. 2024-12-16T21:02:35,178 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] handler.AssignRegionHandler(135): Open testExportExpiredSnapshot,1,1734382954843.3f3089e09ebc056cacee8e9b5d95cda7. 2024-12-16T21:02:35,178 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7285): Opening region: {ENCODED => 5700cdf8b31b01a35a4ac5bf632afccd, NAME => 'testExportExpiredSnapshot,,1734382954843.5700cdf8b31b01a35a4ac5bf632afccd.', STARTKEY => '', ENDKEY => '1'} 2024-12-16T21:02:35,178 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7285): Opening region: {ENCODED => 3f3089e09ebc056cacee8e9b5d95cda7, NAME => 'testExportExpiredSnapshot,1,1734382954843.3f3089e09ebc056cacee8e9b5d95cda7.', STARTKEY => '1', ENDKEY => ''} 2024-12-16T21:02:35,179 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7999): Registered coprocessor service: region=testExportExpiredSnapshot,1,1734382954843.3f3089e09ebc056cacee8e9b5d95cda7. service=AccessControlService 2024-12-16T21:02:35,179 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7999): Registered coprocessor service: region=testExportExpiredSnapshot,,1734382954843.5700cdf8b31b01a35a4ac5bf632afccd. service=AccessControlService 2024-12-16T21:02:35,179 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T21:02:35,179 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T21:02:35,179 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 3f3089e09ebc056cacee8e9b5d95cda7 2024-12-16T21:02:35,179 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 5700cdf8b31b01a35a4ac5bf632afccd 2024-12-16T21:02:35,179 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(894): Instantiated testExportExpiredSnapshot,,1734382954843.5700cdf8b31b01a35a4ac5bf632afccd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:02:35,179 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(894): Instantiated testExportExpiredSnapshot,1,1734382954843.3f3089e09ebc056cacee8e9b5d95cda7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:02:35,179 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7327): checking encryption for 3f3089e09ebc056cacee8e9b5d95cda7 2024-12-16T21:02:35,179 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7327): checking encryption for 5700cdf8b31b01a35a4ac5bf632afccd 2024-12-16T21:02:35,179 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7330): checking classloading for 3f3089e09ebc056cacee8e9b5d95cda7 2024-12-16T21:02:35,179 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7330): checking classloading for 5700cdf8b31b01a35a4ac5bf632afccd 2024-12-16T21:02:35,180 INFO [StoreOpener-5700cdf8b31b01a35a4ac5bf632afccd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5700cdf8b31b01a35a4ac5bf632afccd 2024-12-16T21:02:35,180 INFO [StoreOpener-3f3089e09ebc056cacee8e9b5d95cda7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3f3089e09ebc056cacee8e9b5d95cda7 2024-12-16T21:02:35,181 INFO [StoreOpener-3f3089e09ebc056cacee8e9b5d95cda7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3f3089e09ebc056cacee8e9b5d95cda7 columnFamilyName cf 2024-12-16T21:02:35,181 DEBUG [StoreOpener-3f3089e09ebc056cacee8e9b5d95cda7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:02:35,181 INFO [StoreOpener-5700cdf8b31b01a35a4ac5bf632afccd-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5700cdf8b31b01a35a4ac5bf632afccd columnFamilyName cf 2024-12-16T21:02:35,181 DEBUG [StoreOpener-5700cdf8b31b01a35a4ac5bf632afccd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:02:35,182 INFO [StoreOpener-5700cdf8b31b01a35a4ac5bf632afccd-1 {}] regionserver.HStore(327): Store=5700cdf8b31b01a35a4ac5bf632afccd/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T21:02:35,182 INFO [StoreOpener-3f3089e09ebc056cacee8e9b5d95cda7-1 {}] regionserver.HStore(327): Store=3f3089e09ebc056cacee8e9b5d95cda7/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T21:02:35,183 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportExpiredSnapshot/3f3089e09ebc056cacee8e9b5d95cda7 2024-12-16T21:02:35,183 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportExpiredSnapshot/5700cdf8b31b01a35a4ac5bf632afccd 2024-12-16T21:02:35,183 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportExpiredSnapshot/5700cdf8b31b01a35a4ac5bf632afccd 2024-12-16T21:02:35,183 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportExpiredSnapshot/3f3089e09ebc056cacee8e9b5d95cda7 2024-12-16T21:02:35,185 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1085): writing seq id for 5700cdf8b31b01a35a4ac5bf632afccd 2024-12-16T21:02:35,185 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1085): writing seq id for 3f3089e09ebc056cacee8e9b5d95cda7 2024-12-16T21:02:35,186 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportExpiredSnapshot/3f3089e09ebc056cacee8e9b5d95cda7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T21:02:35,186 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportExpiredSnapshot/5700cdf8b31b01a35a4ac5bf632afccd/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T21:02:35,187 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1102): Opened 3f3089e09ebc056cacee8e9b5d95cda7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73323388, jitterRate=0.09260362386703491}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T21:02:35,187 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1102): Opened 5700cdf8b31b01a35a4ac5bf632afccd; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74098359, jitterRate=0.10415159165859222}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T21:02:35,187 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1001): Region open journal for 5700cdf8b31b01a35a4ac5bf632afccd: 2024-12-16T21:02:35,187 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1001): Region open journal for 3f3089e09ebc056cacee8e9b5d95cda7: 2024-12-16T21:02:35,188 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegionServer(2601): Post open deploy tasks for testExportExpiredSnapshot,,1734382954843.5700cdf8b31b01a35a4ac5bf632afccd., pid=151, masterSystemTime=1734382955175 2024-12-16T21:02:35,188 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegionServer(2601): Post open deploy tasks for testExportExpiredSnapshot,1,1734382954843.3f3089e09ebc056cacee8e9b5d95cda7., pid=150, masterSystemTime=1734382955175 2024-12-16T21:02:35,189 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegionServer(2628): Finished post open deploy task for testExportExpiredSnapshot,,1734382954843.5700cdf8b31b01a35a4ac5bf632afccd. 2024-12-16T21:02:35,189 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] handler.AssignRegionHandler(164): Opened testExportExpiredSnapshot,,1734382954843.5700cdf8b31b01a35a4ac5bf632afccd. 2024-12-16T21:02:35,190 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=5700cdf8b31b01a35a4ac5bf632afccd, regionState=OPEN, openSeqNum=2, regionLocation=6b259cf15929,42323,1734382821139 2024-12-16T21:02:35,190 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegionServer(2628): Finished post open deploy task for testExportExpiredSnapshot,1,1734382954843.3f3089e09ebc056cacee8e9b5d95cda7. 2024-12-16T21:02:35,190 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] handler.AssignRegionHandler(164): Opened testExportExpiredSnapshot,1,1734382954843.3f3089e09ebc056cacee8e9b5d95cda7. 2024-12-16T21:02:35,190 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=149 updating hbase:meta row=3f3089e09ebc056cacee8e9b5d95cda7, regionState=OPEN, openSeqNum=2, regionLocation=6b259cf15929,41713,1734382821274 2024-12-16T21:02:35,192 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=148 2024-12-16T21:02:35,193 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=148, state=SUCCESS; OpenRegionProcedure 5700cdf8b31b01a35a4ac5bf632afccd, server=6b259cf15929,42323,1734382821139 in 167 msec 2024-12-16T21:02:35,193 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=150, resume processing ppid=149 2024-12-16T21:02:35,193 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, ppid=149, state=SUCCESS; OpenRegionProcedure 3f3089e09ebc056cacee8e9b5d95cda7, server=6b259cf15929,41713,1734382821274 in 169 msec 2024-12-16T21:02:35,194 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, ppid=147, state=SUCCESS; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=5700cdf8b31b01a35a4ac5bf632afccd, ASSIGN in 323 msec 2024-12-16T21:02:35,195 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=147 2024-12-16T21:02:35,195 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=147, state=SUCCESS; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=3f3089e09ebc056cacee8e9b5d95cda7, ASSIGN in 323 msec 2024-12-16T21:02:35,195 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T21:02:35,196 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382955195"}]},"ts":"1734382955195"} 2024-12-16T21:02:35,196 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-16T21:02:35,199 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T21:02:35,199 DEBUG [PEWorker-3 {}] access.PermissionStorage(175): Writing permission with rowKey testExportExpiredSnapshot jenkins: RWXCA 2024-12-16T21:02:35,201 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42323 {}] access.PermissionStorage(611): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-16T21:02:35,203 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:35,203 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:35,203 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:35,203 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:35,205 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:35,205 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:35,205 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:35,205 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:35,205 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:35,205 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:35,205 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:35,205 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:35,206 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, state=SUCCESS; CreateTableProcedure table=testExportExpiredSnapshot in 361 msec 2024-12-16T21:02:35,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-16T21:02:35,448 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testExportExpiredSnapshot, procId: 147 completed 2024-12-16T21:02:35,448 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-12-16T21:02:35,448 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T21:02:35,451 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-12-16T21:02:35,451 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T21:02:35,451 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testExportExpiredSnapshot assigned. 2024-12-16T21:02:35,457 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42323 {}] regionserver.HRegion(8254): writing data to region testExportExpiredSnapshot,,1734382954843.5700cdf8b31b01a35a4ac5bf632afccd. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T21:02:35,462 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41713 {}] regionserver.HRegion(8254): writing data to region testExportExpiredSnapshot,1,1734382954843.3f3089e09ebc056cacee8e9b5d95cda7. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T21:02:35,464 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testExportExpiredSnapshot 2024-12-16T21:02:35,464 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testExportExpiredSnapshot,,1734382954843.5700cdf8b31b01a35a4ac5bf632afccd. 2024-12-16T21:02:35,464 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T21:02:35,471 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-16T21:02:35,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snapshot-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-16T21:02:35,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T21:02:35,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0ba22758 to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4513a6f8 2024-12-16T21:02:35,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@579a3d62, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:02:35,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:02:35,477 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37924, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:02:35,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0ba22758 to 127.0.0.1:61742 2024-12-16T21:02:35,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:02:35,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x562a0382 to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@ed65bd9 2024-12-16T21:02:35,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@821064c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:02:35,483 DEBUG [hconnection-0x72bb7c5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:02:35,484 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37938, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:02:35,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:02:35,485 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41068, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:02:35,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x562a0382 to 127.0.0.1:61742 2024-12-16T21:02:35,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:02:35,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] access.PermissionStorage(611): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-16T21:02:35,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T21:02:35,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=152, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-16T21:02:35,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 152 2024-12-16T21:02:35,488 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PREPARE 2024-12-16T21:02:35,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-16T21:02:35,489 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T21:02:35,490 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T21:02:35,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742180_1356 (size=152) 2024-12-16T21:02:35,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742180_1356 (size=152) 2024-12-16T21:02:35,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742180_1356 (size=152) 2024-12-16T21:02:35,496 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T21:02:35,497 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 5700cdf8b31b01a35a4ac5bf632afccd}, {pid=154, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 3f3089e09ebc056cacee8e9b5d95cda7}] 2024-12-16T21:02:35,497 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=154, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 3f3089e09ebc056cacee8e9b5d95cda7 2024-12-16T21:02:35,497 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=153, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 5700cdf8b31b01a35a4ac5bf632afccd 2024-12-16T21:02:35,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-16T21:02:35,648 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6b259cf15929,41713,1734382821274 2024-12-16T21:02:35,648 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6b259cf15929,42323,1734382821139 2024-12-16T21:02:35,648 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42323 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=153 2024-12-16T21:02:35,648 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41713 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=154 2024-12-16T21:02:35,649 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,,1734382954843.5700cdf8b31b01a35a4ac5bf632afccd. 2024-12-16T21:02:35,649 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,1,1734382954843.3f3089e09ebc056cacee8e9b5d95cda7. 2024-12-16T21:02:35,649 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HRegion(2837): Flushing 5700cdf8b31b01a35a4ac5bf632afccd 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-16T21:02:35,649 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HRegion(2837): Flushing 3f3089e09ebc056cacee8e9b5d95cda7 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-16T21:02:35,666 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportExpiredSnapshot/3f3089e09ebc056cacee8e9b5d95cda7/.tmp/cf/f3f69778b68e40f78c3200eac4927755 is 71, key is 12a9a7d7d49e88137d4bd7b23ca379e1/cf:q/1734382955462/Put/seqid=0 2024-12-16T21:02:35,667 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportExpiredSnapshot/5700cdf8b31b01a35a4ac5bf632afccd/.tmp/cf/66a61d29fc5248dc86722c2686cdab92 is 71, key is 0e9bbff051efc7be13890ed88b37e109/cf:q/1734382955456/Put/seqid=0 2024-12-16T21:02:35,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742182_1358 (size=5216) 2024-12-16T21:02:35,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742182_1358 (size=5216) 2024-12-16T21:02:35,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742181_1357 (size=8392) 2024-12-16T21:02:35,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742182_1358 (size=5216) 2024-12-16T21:02:35,674 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportExpiredSnapshot/5700cdf8b31b01a35a4ac5bf632afccd/.tmp/cf/66a61d29fc5248dc86722c2686cdab92 2024-12-16T21:02:35,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742181_1357 (size=8392) 2024-12-16T21:02:35,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742181_1357 (size=8392) 2024-12-16T21:02:35,675 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportExpiredSnapshot/3f3089e09ebc056cacee8e9b5d95cda7/.tmp/cf/f3f69778b68e40f78c3200eac4927755 2024-12-16T21:02:35,680 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportExpiredSnapshot/5700cdf8b31b01a35a4ac5bf632afccd/.tmp/cf/66a61d29fc5248dc86722c2686cdab92 as hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportExpiredSnapshot/5700cdf8b31b01a35a4ac5bf632afccd/cf/66a61d29fc5248dc86722c2686cdab92 2024-12-16T21:02:35,680 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportExpiredSnapshot/3f3089e09ebc056cacee8e9b5d95cda7/.tmp/cf/f3f69778b68e40f78c3200eac4927755 as hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportExpiredSnapshot/3f3089e09ebc056cacee8e9b5d95cda7/cf/f3f69778b68e40f78c3200eac4927755 2024-12-16T21:02:35,684 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportExpiredSnapshot/5700cdf8b31b01a35a4ac5bf632afccd/cf/66a61d29fc5248dc86722c2686cdab92, entries=2, sequenceid=5, filesize=5.1 K 2024-12-16T21:02:35,685 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HRegion(3040): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 5700cdf8b31b01a35a4ac5bf632afccd in 36ms, sequenceid=5, compaction requested=false 2024-12-16T21:02:35,685 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-12-16T21:02:35,685 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportExpiredSnapshot/3f3089e09ebc056cacee8e9b5d95cda7/cf/f3f69778b68e40f78c3200eac4927755, entries=48, sequenceid=5, filesize=8.2 K 2024-12-16T21:02:35,685 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HRegion(2538): Flush status journal for 5700cdf8b31b01a35a4ac5bf632afccd: 2024-12-16T21:02:35,685 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,,1734382954843.5700cdf8b31b01a35a4ac5bf632afccd. for snapshot-testExportExpiredSnapshot completed. 2024-12-16T21:02:35,685 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,,1734382954843.5700cdf8b31b01a35a4ac5bf632afccd.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-16T21:02:35,686 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T21:02:35,686 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportExpiredSnapshot/5700cdf8b31b01a35a4ac5bf632afccd/cf/66a61d29fc5248dc86722c2686cdab92] hfiles 2024-12-16T21:02:35,686 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportExpiredSnapshot/5700cdf8b31b01a35a4ac5bf632afccd/cf/66a61d29fc5248dc86722c2686cdab92 for snapshot=snapshot-testExportExpiredSnapshot 2024-12-16T21:02:35,686 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HRegion(3040): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 3f3089e09ebc056cacee8e9b5d95cda7 in 37ms, sequenceid=5, compaction requested=false 2024-12-16T21:02:35,686 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HRegion(2538): Flush status journal for 3f3089e09ebc056cacee8e9b5d95cda7: 2024-12-16T21:02:35,686 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,1,1734382954843.3f3089e09ebc056cacee8e9b5d95cda7. for snapshot-testExportExpiredSnapshot completed. 2024-12-16T21:02:35,686 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,1,1734382954843.3f3089e09ebc056cacee8e9b5d95cda7.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-16T21:02:35,686 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T21:02:35,686 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportExpiredSnapshot/3f3089e09ebc056cacee8e9b5d95cda7/cf/f3f69778b68e40f78c3200eac4927755] hfiles 2024-12-16T21:02:35,686 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportExpiredSnapshot/3f3089e09ebc056cacee8e9b5d95cda7/cf/f3f69778b68e40f78c3200eac4927755 for snapshot=snapshot-testExportExpiredSnapshot 2024-12-16T21:02:35,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742184_1360 (size=103) 2024-12-16T21:02:35,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742184_1360 (size=103) 2024-12-16T21:02:35,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742183_1359 (size=103) 2024-12-16T21:02:35,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742183_1359 (size=103) 2024-12-16T21:02:35,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742183_1359 (size=103) 2024-12-16T21:02:35,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742184_1360 (size=103) 2024-12-16T21:02:35,700 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,,1734382954843.5700cdf8b31b01a35a4ac5bf632afccd. 2024-12-16T21:02:35,700 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=153 2024-12-16T21:02:35,700 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,1,1734382954843.3f3089e09ebc056cacee8e9b5d95cda7. 2024-12-16T21:02:35,700 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=154 2024-12-16T21:02:35,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] master.HMaster(4106): Remote procedure done, pid=153 2024-12-16T21:02:35,700 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 5700cdf8b31b01a35a4ac5bf632afccd 2024-12-16T21:02:35,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster(4106): Remote procedure done, pid=154 2024-12-16T21:02:35,701 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 3f3089e09ebc056cacee8e9b5d95cda7 2024-12-16T21:02:35,701 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=154, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 3f3089e09ebc056cacee8e9b5d95cda7 2024-12-16T21:02:35,701 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=153, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 5700cdf8b31b01a35a4ac5bf632afccd 2024-12-16T21:02:35,704 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; SnapshotRegionProcedure 5700cdf8b31b01a35a4ac5bf632afccd in 207 msec 2024-12-16T21:02:35,705 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=154, resume processing ppid=152 2024-12-16T21:02:35,705 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, ppid=152, state=SUCCESS; SnapshotRegionProcedure 3f3089e09ebc056cacee8e9b5d95cda7 in 207 msec 2024-12-16T21:02:35,705 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T21:02:35,706 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T21:02:35,706 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T21:02:35,706 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportExpiredSnapshot 2024-12-16T21:02:35,707 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-16T21:02:35,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742185_1361 (size=609) 2024-12-16T21:02:35,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742185_1361 (size=609) 2024-12-16T21:02:35,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742185_1361 (size=609) 2024-12-16T21:02:35,717 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T21:02:35,721 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T21:02:35,722 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snapshot-testExportExpiredSnapshot 2024-12-16T21:02:35,726 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T21:02:35,727 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 152 2024-12-16T21:02:35,728 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } in 240 msec 2024-12-16T21:02:35,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-16T21:02:35,790 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testExportExpiredSnapshot, procId: 152 completed 2024-12-16T21:02:37,655 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0006_000001 (auth:SIMPLE) from 127.0.0.1:39564 2024-12-16T21:02:37,666 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-0_0/usercache/jenkins/appcache/application_1734382828217_0006/container_1734382828217_0006_01_000001/launch_container.sh] 2024-12-16T21:02:37,666 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-0_0/usercache/jenkins/appcache/application_1734382828217_0006/container_1734382828217_0006_01_000001/container_tokens] 2024-12-16T21:02:37,666 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-0_0/usercache/jenkins/appcache/application_1734382828217_0006/container_1734382828217_0006_01_000001/sysfs] 2024-12-16T21:02:38,352 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-16T21:02:40,871 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-16T21:02:40,871 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-16T21:02:40,872 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-16T21:02:40,872 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-16T21:02:40,873 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T21:02:40,873 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T21:02:45,798 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382965798 2024-12-16T21:02:45,798 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:37263, tgtDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382965798, rawTgtDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382965798, srcFsUri=hdfs://localhost:37263, srcDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:02:45,826 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:37263, inputRoot=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:02:45,826 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-715246216_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382965798, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382965798/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-16T21:02:45,829 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-16T21:02:45,829 ERROR [Time-limited test {}] util.AbstractHBaseTool(153): Error running command-line tool org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException: TTL for snapshot 'snapshot-testExportExpiredSnapshot' has already expired. at org.apache.hadoop.hbase.snapshot.ExportSnapshot.verifySnapshot(ExportSnapshot.java:948) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1093) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:151) ~[hbase-common-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:523) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportExpiredSnapshot(TestExportSnapshot.java:315) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T21:02:45,830 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportExpiredSnapshot 2024-12-16T21:02:45,831 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportExpiredSnapshot 2024-12-16T21:02:45,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=155, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-16T21:02:45,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-16T21:02:45,833 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382965833"}]},"ts":"1734382965833"} 2024-12-16T21:02:45,834 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLING in hbase:meta 2024-12-16T21:02:45,836 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportExpiredSnapshot to state=DISABLING 2024-12-16T21:02:45,837 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot}] 2024-12-16T21:02:45,838 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=157, ppid=156, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=fef0cc17f37d2f685fa3edfbbb56465a, UNASSIGN}, {pid=158, ppid=156, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=6bd63f0b66fb54e0b600984f1e422b1d, UNASSIGN}] 2024-12-16T21:02:45,838 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=158, ppid=156, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=6bd63f0b66fb54e0b600984f1e422b1d, UNASSIGN 2024-12-16T21:02:45,838 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=157, ppid=156, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=fef0cc17f37d2f685fa3edfbbb56465a, UNASSIGN 2024-12-16T21:02:45,839 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=158 updating hbase:meta row=6bd63f0b66fb54e0b600984f1e422b1d, regionState=CLOSING, regionLocation=6b259cf15929,42323,1734382821139 2024-12-16T21:02:45,839 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=157 updating hbase:meta row=fef0cc17f37d2f685fa3edfbbb56465a, regionState=CLOSING, regionLocation=6b259cf15929,41713,1734382821274 2024-12-16T21:02:45,840 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T21:02:45,840 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE; CloseRegionProcedure 6bd63f0b66fb54e0b600984f1e422b1d, server=6b259cf15929,42323,1734382821139}] 2024-12-16T21:02:45,840 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T21:02:45,840 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=157, state=RUNNABLE; CloseRegionProcedure fef0cc17f37d2f685fa3edfbbb56465a, server=6b259cf15929,41713,1734382821274}] 2024-12-16T21:02:45,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-16T21:02:45,991 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6b259cf15929,41713,1734382821274 2024-12-16T21:02:45,991 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6b259cf15929,42323,1734382821139 2024-12-16T21:02:45,992 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(124): Close fef0cc17f37d2f685fa3edfbbb56465a 2024-12-16T21:02:45,992 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] handler.UnassignRegionHandler(124): Close 6bd63f0b66fb54e0b600984f1e422b1d 2024-12-16T21:02:45,992 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T21:02:45,992 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T21:02:45,992 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1681): Closing fef0cc17f37d2f685fa3edfbbb56465a, disabling compactions & flushes 2024-12-16T21:02:45,992 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1681): Closing 6bd63f0b66fb54e0b600984f1e422b1d, disabling compactions & flushes 2024-12-16T21:02:45,992 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1703): Closing region testtb-testExportExpiredSnapshot,,1734382953558.fef0cc17f37d2f685fa3edfbbb56465a. 2024-12-16T21:02:45,992 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1703): Closing region testtb-testExportExpiredSnapshot,1,1734382953558.6bd63f0b66fb54e0b600984f1e422b1d. 2024-12-16T21:02:45,992 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportExpiredSnapshot,,1734382953558.fef0cc17f37d2f685fa3edfbbb56465a. 2024-12-16T21:02:45,992 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportExpiredSnapshot,1,1734382953558.6bd63f0b66fb54e0b600984f1e422b1d. 2024-12-16T21:02:45,992 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportExpiredSnapshot,,1734382953558.fef0cc17f37d2f685fa3edfbbb56465a. after waiting 0 ms 2024-12-16T21:02:45,992 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportExpiredSnapshot,1,1734382953558.6bd63f0b66fb54e0b600984f1e422b1d. after waiting 0 ms 2024-12-16T21:02:45,992 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportExpiredSnapshot,1,1734382953558.6bd63f0b66fb54e0b600984f1e422b1d. 2024-12-16T21:02:45,992 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportExpiredSnapshot,,1734382953558.fef0cc17f37d2f685fa3edfbbb56465a. 2024-12-16T21:02:45,996 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportExpiredSnapshot/6bd63f0b66fb54e0b600984f1e422b1d/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T21:02:45,997 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportExpiredSnapshot/fef0cc17f37d2f685fa3edfbbb56465a/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T21:02:45,997 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T21:02:45,997 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T21:02:45,997 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1922): Closed testtb-testExportExpiredSnapshot,1,1734382953558.6bd63f0b66fb54e0b600984f1e422b1d. 2024-12-16T21:02:45,997 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1635): Region close journal for 6bd63f0b66fb54e0b600984f1e422b1d: 2024-12-16T21:02:45,997 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1922): Closed testtb-testExportExpiredSnapshot,,1734382953558.fef0cc17f37d2f685fa3edfbbb56465a. 2024-12-16T21:02:45,997 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1635): Region close journal for fef0cc17f37d2f685fa3edfbbb56465a: 2024-12-16T21:02:45,998 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] handler.UnassignRegionHandler(170): Closed 6bd63f0b66fb54e0b600984f1e422b1d 2024-12-16T21:02:45,999 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=158 updating hbase:meta row=6bd63f0b66fb54e0b600984f1e422b1d, regionState=CLOSED 2024-12-16T21:02:45,999 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(170): Closed fef0cc17f37d2f685fa3edfbbb56465a 2024-12-16T21:02:45,999 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=157 updating hbase:meta row=fef0cc17f37d2f685fa3edfbbb56465a, regionState=CLOSED 2024-12-16T21:02:46,001 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=159, resume processing ppid=158 2024-12-16T21:02:46,001 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, ppid=158, state=SUCCESS; CloseRegionProcedure 6bd63f0b66fb54e0b600984f1e422b1d, server=6b259cf15929,42323,1734382821139 in 160 msec 2024-12-16T21:02:46,002 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=157 2024-12-16T21:02:46,002 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, ppid=156, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=6bd63f0b66fb54e0b600984f1e422b1d, UNASSIGN in 164 msec 2024-12-16T21:02:46,002 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=157, state=SUCCESS; CloseRegionProcedure fef0cc17f37d2f685fa3edfbbb56465a, server=6b259cf15929,41713,1734382821274 in 160 msec 2024-12-16T21:02:46,003 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=157, resume processing ppid=156 2024-12-16T21:02:46,003 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, ppid=156, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=fef0cc17f37d2f685fa3edfbbb56465a, UNASSIGN in 165 msec 2024-12-16T21:02:46,004 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=155 2024-12-16T21:02:46,004 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=155, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot in 166 msec 2024-12-16T21:02:46,005 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382966005"}]},"ts":"1734382966005"} 2024-12-16T21:02:46,006 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLED in hbase:meta 2024-12-16T21:02:46,008 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportExpiredSnapshot to state=DISABLED 2024-12-16T21:02:46,009 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, state=SUCCESS; DisableTableProcedure table=testtb-testExportExpiredSnapshot in 178 msec 2024-12-16T21:02:46,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-16T21:02:46,135 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportExpiredSnapshot, procId: 155 completed 2024-12-16T21:02:46,135 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportExpiredSnapshot 2024-12-16T21:02:46,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-16T21:02:46,137 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-16T21:02:46,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportExpiredSnapshot 2024-12-16T21:02:46,137 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=161, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-16T21:02:46,138 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42323 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportExpiredSnapshot 2024-12-16T21:02:46,140 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportExpiredSnapshot/6bd63f0b66fb54e0b600984f1e422b1d 2024-12-16T21:02:46,140 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportExpiredSnapshot/fef0cc17f37d2f685fa3edfbbb56465a 2024-12-16T21:02:46,142 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportExpiredSnapshot/6bd63f0b66fb54e0b600984f1e422b1d/cf, FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportExpiredSnapshot/6bd63f0b66fb54e0b600984f1e422b1d/recovered.edits] 2024-12-16T21:02:46,142 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportExpiredSnapshot/fef0cc17f37d2f685fa3edfbbb56465a/cf, FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportExpiredSnapshot/fef0cc17f37d2f685fa3edfbbb56465a/recovered.edits] 2024-12-16T21:02:46,143 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-16T21:02:46,143 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-16T21:02:46,143 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-16T21:02:46,143 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-16T21:02:46,144 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-16T21:02:46,144 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-16T21:02:46,144 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-16T21:02:46,144 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-16T21:02:46,146 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportExpiredSnapshot/fef0cc17f37d2f685fa3edfbbb56465a/cf/5fc342f9620049878af1513df1c52745 to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testExportExpiredSnapshot/fef0cc17f37d2f685fa3edfbbb56465a/cf/5fc342f9620049878af1513df1c52745 2024-12-16T21:02:46,146 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportExpiredSnapshot/6bd63f0b66fb54e0b600984f1e422b1d/cf/0296fb278da348a89727581e963d4d05 to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testExportExpiredSnapshot/6bd63f0b66fb54e0b600984f1e422b1d/cf/0296fb278da348a89727581e963d4d05 2024-12-16T21:02:46,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-16T21:02:46,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-16T21:02:46,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-16T21:02:46,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:46,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:46,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:46,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-16T21:02:46,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:46,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-16T21:02:46,149 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportExpiredSnapshot/fef0cc17f37d2f685fa3edfbbb56465a/recovered.edits/9.seqid to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testExportExpiredSnapshot/fef0cc17f37d2f685fa3edfbbb56465a/recovered.edits/9.seqid 2024-12-16T21:02:46,149 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportExpiredSnapshot/6bd63f0b66fb54e0b600984f1e422b1d/recovered.edits/9.seqid to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testExportExpiredSnapshot/6bd63f0b66fb54e0b600984f1e422b1d/recovered.edits/9.seqid 2024-12-16T21:02:46,149 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportExpiredSnapshot/fef0cc17f37d2f685fa3edfbbb56465a 2024-12-16T21:02:46,150 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportExpiredSnapshot/6bd63f0b66fb54e0b600984f1e422b1d 2024-12-16T21:02:46,150 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportExpiredSnapshot regions 2024-12-16T21:02:46,150 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:46,150 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:46,150 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:46,151 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:46,152 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=161, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-16T21:02:46,153 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportExpiredSnapshot from hbase:meta 2024-12-16T21:02:46,155 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportExpiredSnapshot' descriptor. 2024-12-16T21:02:46,156 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=161, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-16T21:02:46,156 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportExpiredSnapshot' from region states. 2024-12-16T21:02:46,156 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,,1734382953558.fef0cc17f37d2f685fa3edfbbb56465a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734382966156"}]},"ts":"9223372036854775807"} 2024-12-16T21:02:46,156 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,1,1734382953558.6bd63f0b66fb54e0b600984f1e422b1d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734382966156"}]},"ts":"9223372036854775807"} 2024-12-16T21:02:46,157 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-16T21:02:46,157 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => fef0cc17f37d2f685fa3edfbbb56465a, NAME => 'testtb-testExportExpiredSnapshot,,1734382953558.fef0cc17f37d2f685fa3edfbbb56465a.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 6bd63f0b66fb54e0b600984f1e422b1d, NAME => 'testtb-testExportExpiredSnapshot,1,1734382953558.6bd63f0b66fb54e0b600984f1e422b1d.', STARTKEY => '1', ENDKEY => ''}] 2024-12-16T21:02:46,158 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportExpiredSnapshot' as deleted. 2024-12-16T21:02:46,158 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734382966158"}]},"ts":"9223372036854775807"} 2024-12-16T21:02:46,159 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportExpiredSnapshot state from META 2024-12-16T21:02:46,161 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=161, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-16T21:02:46,162 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, state=SUCCESS; DeleteTableProcedure table=testtb-testExportExpiredSnapshot in 26 msec 2024-12-16T21:02:46,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-16T21:02:46,248 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportExpiredSnapshot, procId: 161 completed 2024-12-16T21:02:46,257 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportExpiredSnapshot" 2024-12-16T21:02:46,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportExpiredSnapshot 2024-12-16T21:02:46,259 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportExpiredSnapshot" 2024-12-16T21:02:46,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(380): Deleting snapshot: snapshot-testExportExpiredSnapshot 2024-12-16T21:02:46,262 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportExpiredSnapshot" 2024-12-16T21:02:46,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportExpiredSnapshot 2024-12-16T21:02:46,287 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=801 (was 808), OpenFileDescriptor=799 (was 821), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=384 (was 453), ProcessCount=11 (was 17), AvailableMemoryMB=4115 (was 3303) - AvailableMemoryMB LEAK? - 2024-12-16T21:02:46,287 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=801 is superior to 500 2024-12-16T21:02:46,306 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=801, OpenFileDescriptor=799, MaxFileDescriptor=1048576, SystemLoadAverage=384, ProcessCount=11, AvailableMemoryMB=4115 2024-12-16T21:02:46,306 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=801 is superior to 500 2024-12-16T21:02:46,307 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T21:02:46,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=162, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-16T21:02:46,309 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T21:02:46,309 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:02:46,309 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testEmptyExportFileSystemState" procId is: 162 2024-12-16T21:02:46,310 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T21:02:46,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-16T21:02:46,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742186_1362 (size=412) 2024-12-16T21:02:46,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742186_1362 (size=412) 2024-12-16T21:02:46,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742186_1362 (size=412) 2024-12-16T21:02:46,321 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 9cbe153285d5b4aa9bf2838f14e7e5fd, NAME => 'testtb-testEmptyExportFileSystemState,,1734382966307.9cbe153285d5b4aa9bf2838f14e7e5fd.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:02:46,321 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 57d4207d2930040d72aff92536a70dd3, NAME => 'testtb-testEmptyExportFileSystemState,1,1734382966307.57d4207d2930040d72aff92536a70dd3.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:02:46,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742187_1363 (size=73) 2024-12-16T21:02:46,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742187_1363 (size=73) 2024-12-16T21:02:46,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742188_1364 (size=73) 2024-12-16T21:02:46,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742188_1364 (size=73) 2024-12-16T21:02:46,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742187_1363 (size=73) 2024-12-16T21:02:46,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742188_1364 (size=73) 2024-12-16T21:02:46,332 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testEmptyExportFileSystemState,1,1734382966307.57d4207d2930040d72aff92536a70dd3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:02:46,332 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testEmptyExportFileSystemState,,1734382966307.9cbe153285d5b4aa9bf2838f14e7e5fd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:02:46,332 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1681): Closing 57d4207d2930040d72aff92536a70dd3, disabling compactions & flushes 2024-12-16T21:02:46,332 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1681): Closing 9cbe153285d5b4aa9bf2838f14e7e5fd, disabling compactions & flushes 2024-12-16T21:02:46,332 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testEmptyExportFileSystemState,1,1734382966307.57d4207d2930040d72aff92536a70dd3. 2024-12-16T21:02:46,333 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testEmptyExportFileSystemState,,1734382966307.9cbe153285d5b4aa9bf2838f14e7e5fd. 2024-12-16T21:02:46,333 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testEmptyExportFileSystemState,1,1734382966307.57d4207d2930040d72aff92536a70dd3. 2024-12-16T21:02:46,333 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testEmptyExportFileSystemState,,1734382966307.9cbe153285d5b4aa9bf2838f14e7e5fd. 2024-12-16T21:02:46,333 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1734382966307.57d4207d2930040d72aff92536a70dd3. after waiting 0 ms 2024-12-16T21:02:46,333 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testEmptyExportFileSystemState,,1734382966307.9cbe153285d5b4aa9bf2838f14e7e5fd. after waiting 0 ms 2024-12-16T21:02:46,333 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1734382966307.57d4207d2930040d72aff92536a70dd3. 2024-12-16T21:02:46,333 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testEmptyExportFileSystemState,,1734382966307.9cbe153285d5b4aa9bf2838f14e7e5fd. 2024-12-16T21:02:46,333 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testEmptyExportFileSystemState,1,1734382966307.57d4207d2930040d72aff92536a70dd3. 2024-12-16T21:02:46,333 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testEmptyExportFileSystemState,,1734382966307.9cbe153285d5b4aa9bf2838f14e7e5fd. 2024-12-16T21:02:46,333 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1635): Region close journal for 57d4207d2930040d72aff92536a70dd3: 2024-12-16T21:02:46,333 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1635): Region close journal for 9cbe153285d5b4aa9bf2838f14e7e5fd: 2024-12-16T21:02:46,333 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T21:02:46,334 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,1,1734382966307.57d4207d2930040d72aff92536a70dd3.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1734382966333"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734382966333"}]},"ts":"1734382966333"} 2024-12-16T21:02:46,334 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,,1734382966307.9cbe153285d5b4aa9bf2838f14e7e5fd.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1734382966333"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734382966333"}]},"ts":"1734382966333"} 2024-12-16T21:02:46,336 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-16T21:02:46,336 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T21:02:46,336 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382966336"}]},"ts":"1734382966336"} 2024-12-16T21:02:46,337 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLING in hbase:meta 2024-12-16T21:02:46,341 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(202): Hosts are {6b259cf15929=0} racks are {/default-rack=0} 2024-12-16T21:02:46,342 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-16T21:02:46,342 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-16T21:02:46,342 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-16T21:02:46,342 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-16T21:02:46,342 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-16T21:02:46,343 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-16T21:02:46,343 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-16T21:02:46,343 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=9cbe153285d5b4aa9bf2838f14e7e5fd, ASSIGN}, {pid=164, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=57d4207d2930040d72aff92536a70dd3, ASSIGN}] 2024-12-16T21:02:46,344 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=164, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=57d4207d2930040d72aff92536a70dd3, ASSIGN 2024-12-16T21:02:46,344 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=163, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=9cbe153285d5b4aa9bf2838f14e7e5fd, ASSIGN 2024-12-16T21:02:46,344 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=164, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=57d4207d2930040d72aff92536a70dd3, ASSIGN; state=OFFLINE, location=6b259cf15929,42323,1734382821139; forceNewPlan=false, retain=false 2024-12-16T21:02:46,344 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=163, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=9cbe153285d5b4aa9bf2838f14e7e5fd, ASSIGN; state=OFFLINE, location=6b259cf15929,41713,1734382821274; forceNewPlan=false, retain=false 2024-12-16T21:02:46,375 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-16T21:02:46,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-16T21:02:46,494 INFO [6b259cf15929:34343 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-16T21:02:46,495 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=163 updating hbase:meta row=9cbe153285d5b4aa9bf2838f14e7e5fd, regionState=OPENING, regionLocation=6b259cf15929,41713,1734382821274 2024-12-16T21:02:46,495 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=164 updating hbase:meta row=57d4207d2930040d72aff92536a70dd3, regionState=OPENING, regionLocation=6b259cf15929,42323,1734382821139 2024-12-16T21:02:46,497 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=165, ppid=164, state=RUNNABLE; OpenRegionProcedure 57d4207d2930040d72aff92536a70dd3, server=6b259cf15929,42323,1734382821139}] 2024-12-16T21:02:46,498 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=163, state=RUNNABLE; OpenRegionProcedure 9cbe153285d5b4aa9bf2838f14e7e5fd, server=6b259cf15929,41713,1734382821274}] 2024-12-16T21:02:46,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-16T21:02:46,650 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6b259cf15929,42323,1734382821139 2024-12-16T21:02:46,651 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6b259cf15929,41713,1734382821274 2024-12-16T21:02:46,652 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] handler.AssignRegionHandler(135): Open testtb-testEmptyExportFileSystemState,1,1734382966307.57d4207d2930040d72aff92536a70dd3. 2024-12-16T21:02:46,652 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7285): Opening region: {ENCODED => 57d4207d2930040d72aff92536a70dd3, NAME => 'testtb-testEmptyExportFileSystemState,1,1734382966307.57d4207d2930040d72aff92536a70dd3.', STARTKEY => '1', ENDKEY => ''} 2024-12-16T21:02:46,653 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,1,1734382966307.57d4207d2930040d72aff92536a70dd3. service=AccessControlService 2024-12-16T21:02:46,653 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T21:02:46,653 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] handler.AssignRegionHandler(135): Open testtb-testEmptyExportFileSystemState,,1734382966307.9cbe153285d5b4aa9bf2838f14e7e5fd. 2024-12-16T21:02:46,653 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7285): Opening region: {ENCODED => 9cbe153285d5b4aa9bf2838f14e7e5fd, NAME => 'testtb-testEmptyExportFileSystemState,,1734382966307.9cbe153285d5b4aa9bf2838f14e7e5fd.', STARTKEY => '', ENDKEY => '1'} 2024-12-16T21:02:46,653 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 57d4207d2930040d72aff92536a70dd3 2024-12-16T21:02:46,653 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(894): Instantiated testtb-testEmptyExportFileSystemState,1,1734382966307.57d4207d2930040d72aff92536a70dd3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:02:46,653 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7327): checking encryption for 57d4207d2930040d72aff92536a70dd3 2024-12-16T21:02:46,653 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7330): checking classloading for 57d4207d2930040d72aff92536a70dd3 2024-12-16T21:02:46,653 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,,1734382966307.9cbe153285d5b4aa9bf2838f14e7e5fd. service=AccessControlService 2024-12-16T21:02:46,654 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T21:02:46,654 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 9cbe153285d5b4aa9bf2838f14e7e5fd 2024-12-16T21:02:46,654 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(894): Instantiated testtb-testEmptyExportFileSystemState,,1734382966307.9cbe153285d5b4aa9bf2838f14e7e5fd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:02:46,654 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7327): checking encryption for 9cbe153285d5b4aa9bf2838f14e7e5fd 2024-12-16T21:02:46,654 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7330): checking classloading for 9cbe153285d5b4aa9bf2838f14e7e5fd 2024-12-16T21:02:46,655 INFO [StoreOpener-57d4207d2930040d72aff92536a70dd3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 57d4207d2930040d72aff92536a70dd3 2024-12-16T21:02:46,655 INFO [StoreOpener-9cbe153285d5b4aa9bf2838f14e7e5fd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 9cbe153285d5b4aa9bf2838f14e7e5fd 2024-12-16T21:02:46,656 INFO [StoreOpener-57d4207d2930040d72aff92536a70dd3-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 57d4207d2930040d72aff92536a70dd3 columnFamilyName cf 2024-12-16T21:02:46,656 DEBUG [StoreOpener-57d4207d2930040d72aff92536a70dd3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:02:46,656 INFO [StoreOpener-9cbe153285d5b4aa9bf2838f14e7e5fd-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9cbe153285d5b4aa9bf2838f14e7e5fd columnFamilyName cf 2024-12-16T21:02:46,656 DEBUG [StoreOpener-9cbe153285d5b4aa9bf2838f14e7e5fd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:02:46,656 INFO [StoreOpener-57d4207d2930040d72aff92536a70dd3-1 {}] regionserver.HStore(327): Store=57d4207d2930040d72aff92536a70dd3/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T21:02:46,656 INFO [StoreOpener-9cbe153285d5b4aa9bf2838f14e7e5fd-1 {}] regionserver.HStore(327): Store=9cbe153285d5b4aa9bf2838f14e7e5fd/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T21:02:46,657 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testEmptyExportFileSystemState/57d4207d2930040d72aff92536a70dd3 2024-12-16T21:02:46,657 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testEmptyExportFileSystemState/9cbe153285d5b4aa9bf2838f14e7e5fd 2024-12-16T21:02:46,657 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testEmptyExportFileSystemState/57d4207d2930040d72aff92536a70dd3 2024-12-16T21:02:46,657 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testEmptyExportFileSystemState/9cbe153285d5b4aa9bf2838f14e7e5fd 2024-12-16T21:02:46,659 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1085): writing seq id for 9cbe153285d5b4aa9bf2838f14e7e5fd 2024-12-16T21:02:46,659 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1085): writing seq id for 57d4207d2930040d72aff92536a70dd3 2024-12-16T21:02:46,661 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testEmptyExportFileSystemState/9cbe153285d5b4aa9bf2838f14e7e5fd/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T21:02:46,661 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testEmptyExportFileSystemState/57d4207d2930040d72aff92536a70dd3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T21:02:46,661 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1102): Opened 9cbe153285d5b4aa9bf2838f14e7e5fd; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65362329, jitterRate=-0.026025399565696716}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T21:02:46,661 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1102): Opened 57d4207d2930040d72aff92536a70dd3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65592301, jitterRate=-0.022598549723625183}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T21:02:46,662 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1001): Region open journal for 9cbe153285d5b4aa9bf2838f14e7e5fd: 2024-12-16T21:02:46,662 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1001): Region open journal for 57d4207d2930040d72aff92536a70dd3: 2024-12-16T21:02:46,662 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testEmptyExportFileSystemState,,1734382966307.9cbe153285d5b4aa9bf2838f14e7e5fd., pid=166, masterSystemTime=1734382966651 2024-12-16T21:02:46,662 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testEmptyExportFileSystemState,1,1734382966307.57d4207d2930040d72aff92536a70dd3., pid=165, masterSystemTime=1734382966649 2024-12-16T21:02:46,664 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testEmptyExportFileSystemState,1,1734382966307.57d4207d2930040d72aff92536a70dd3. 2024-12-16T21:02:46,664 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] handler.AssignRegionHandler(164): Opened testtb-testEmptyExportFileSystemState,1,1734382966307.57d4207d2930040d72aff92536a70dd3. 2024-12-16T21:02:46,664 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=164 updating hbase:meta row=57d4207d2930040d72aff92536a70dd3, regionState=OPEN, openSeqNum=2, regionLocation=6b259cf15929,42323,1734382821139 2024-12-16T21:02:46,664 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testEmptyExportFileSystemState,,1734382966307.9cbe153285d5b4aa9bf2838f14e7e5fd. 2024-12-16T21:02:46,664 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] handler.AssignRegionHandler(164): Opened testtb-testEmptyExportFileSystemState,,1734382966307.9cbe153285d5b4aa9bf2838f14e7e5fd. 2024-12-16T21:02:46,665 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=163 updating hbase:meta row=9cbe153285d5b4aa9bf2838f14e7e5fd, regionState=OPEN, openSeqNum=2, regionLocation=6b259cf15929,41713,1734382821274 2024-12-16T21:02:46,667 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=165, resume processing ppid=164 2024-12-16T21:02:46,667 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, ppid=164, state=SUCCESS; OpenRegionProcedure 57d4207d2930040d72aff92536a70dd3, server=6b259cf15929,42323,1734382821139 in 169 msec 2024-12-16T21:02:46,667 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=163 2024-12-16T21:02:46,668 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=162, state=SUCCESS; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=57d4207d2930040d72aff92536a70dd3, ASSIGN in 324 msec 2024-12-16T21:02:46,668 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=163, state=SUCCESS; OpenRegionProcedure 9cbe153285d5b4aa9bf2838f14e7e5fd, server=6b259cf15929,41713,1734382821274 in 168 msec 2024-12-16T21:02:46,669 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=163, resume processing ppid=162 2024-12-16T21:02:46,669 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, ppid=162, state=SUCCESS; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=9cbe153285d5b4aa9bf2838f14e7e5fd, ASSIGN in 324 msec 2024-12-16T21:02:46,669 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T21:02:46,669 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382966669"}]},"ts":"1734382966669"} 2024-12-16T21:02:46,670 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLED in hbase:meta 2024-12-16T21:02:46,672 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T21:02:46,672 DEBUG [PEWorker-1 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testEmptyExportFileSystemState jenkins: RWXCA 2024-12-16T21:02:46,674 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42323 {}] access.PermissionStorage(611): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-16T21:02:46,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:46,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:46,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:46,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:46,682 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:46,682 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:46,682 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:46,682 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:46,682 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:46,682 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:46,682 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:46,682 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:46,683 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, state=SUCCESS; CreateTableProcedure table=testtb-testEmptyExportFileSystemState in 374 msec 2024-12-16T21:02:46,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-16T21:02:46,913 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 162 completed 2024-12-16T21:02:46,913 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testEmptyExportFileSystemState get assigned. Timeout = 60000ms 2024-12-16T21:02:46,913 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T21:02:46,916 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testEmptyExportFileSystemState assigned to meta. Checking AM states. 2024-12-16T21:02:46,917 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T21:02:46,917 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testEmptyExportFileSystemState assigned. 2024-12-16T21:02:46,919 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-16T21:02:46,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734382966919 (current time:1734382966919). 2024-12-16T21:02:46,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-16T21:02:46,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-16T21:02:46,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T21:02:46,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x609873a1 to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@46bf663 2024-12-16T21:02:46,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6705f710, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:02:46,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:02:46,926 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50256, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:02:46,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x609873a1 to 127.0.0.1:61742 2024-12-16T21:02:46,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:02:46,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7d083bab to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@268b0287 2024-12-16T21:02:46,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e7ecc31, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:02:46,931 DEBUG [hconnection-0x4289e424-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:02:46,932 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50266, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:02:46,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:02:46,934 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60616, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:02:46,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7d083bab to 127.0.0.1:61742 2024-12-16T21:02:46,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:02:46,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] access.PermissionStorage(611): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-16T21:02:46,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T21:02:46,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-16T21:02:46,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 167 2024-12-16T21:02:46,937 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-16T21:02:46,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-16T21:02:46,938 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T21:02:46,940 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T21:02:46,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742189_1365 (size=185) 2024-12-16T21:02:46,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742189_1365 (size=185) 2024-12-16T21:02:46,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742189_1365 (size=185) 2024-12-16T21:02:46,946 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T21:02:46,946 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 9cbe153285d5b4aa9bf2838f14e7e5fd}, {pid=169, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 57d4207d2930040d72aff92536a70dd3}] 2024-12-16T21:02:46,947 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=169, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 57d4207d2930040d72aff92536a70dd3 2024-12-16T21:02:46,947 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=168, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 9cbe153285d5b4aa9bf2838f14e7e5fd 2024-12-16T21:02:47,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-16T21:02:47,098 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6b259cf15929,42323,1734382821139 2024-12-16T21:02:47,098 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6b259cf15929,41713,1734382821274 2024-12-16T21:02:47,098 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41713 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=168 2024-12-16T21:02:47,098 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42323 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=169 2024-12-16T21:02:47,098 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1734382966307.57d4207d2930040d72aff92536a70dd3. 2024-12-16T21:02:47,098 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1734382966307.9cbe153285d5b4aa9bf2838f14e7e5fd. 2024-12-16T21:02:47,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.HRegion(2538): Flush status journal for 57d4207d2930040d72aff92536a70dd3: 2024-12-16T21:02:47,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.HRegion(2538): Flush status journal for 9cbe153285d5b4aa9bf2838f14e7e5fd: 2024-12-16T21:02:47,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1734382966307.57d4207d2930040d72aff92536a70dd3. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-16T21:02:47,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1734382966307.9cbe153285d5b4aa9bf2838f14e7e5fd. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-16T21:02:47,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1734382966307.57d4207d2930040d72aff92536a70dd3.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-16T21:02:47,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T21:02:47,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-16T21:02:47,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1734382966307.9cbe153285d5b4aa9bf2838f14e7e5fd.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-16T21:02:47,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T21:02:47,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-16T21:02:47,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742191_1367 (size=76) 2024-12-16T21:02:47,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742191_1367 (size=76) 2024-12-16T21:02:47,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742191_1367 (size=76) 2024-12-16T21:02:47,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742190_1366 (size=76) 2024-12-16T21:02:47,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742190_1366 (size=76) 2024-12-16T21:02:47,107 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1734382966307.9cbe153285d5b4aa9bf2838f14e7e5fd. 2024-12-16T21:02:47,107 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=168 2024-12-16T21:02:47,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742190_1366 (size=76) 2024-12-16T21:02:47,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster(4106): Remote procedure done, pid=168 2024-12-16T21:02:47,108 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 9cbe153285d5b4aa9bf2838f14e7e5fd 2024-12-16T21:02:47,108 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1734382966307.57d4207d2930040d72aff92536a70dd3. 2024-12-16T21:02:47,108 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=169 2024-12-16T21:02:47,108 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=168, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 9cbe153285d5b4aa9bf2838f14e7e5fd 2024-12-16T21:02:47,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster(4106): Remote procedure done, pid=169 2024-12-16T21:02:47,108 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 57d4207d2930040d72aff92536a70dd3 2024-12-16T21:02:47,108 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=169, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 57d4207d2930040d72aff92536a70dd3 2024-12-16T21:02:47,110 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; SnapshotRegionProcedure 9cbe153285d5b4aa9bf2838f14e7e5fd in 163 msec 2024-12-16T21:02:47,111 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=169, resume processing ppid=167 2024-12-16T21:02:47,111 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, ppid=167, state=SUCCESS; SnapshotRegionProcedure 57d4207d2930040d72aff92536a70dd3 in 163 msec 2024-12-16T21:02:47,111 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T21:02:47,111 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T21:02:47,112 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T21:02:47,112 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testEmptyExportFileSystemState 2024-12-16T21:02:47,112 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-16T21:02:47,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742192_1368 (size=567) 2024-12-16T21:02:47,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742192_1368 (size=567) 2024-12-16T21:02:47,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742192_1368 (size=567) 2024-12-16T21:02:47,124 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T21:02:47,128 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T21:02:47,128 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-16T21:02:47,130 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T21:02:47,130 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 167 2024-12-16T21:02:47,131 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 195 msec 2024-12-16T21:02:47,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-16T21:02:47,239 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 167 completed 2024-12-16T21:02:47,245 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42323 {}] regionserver.HRegion(8254): writing data to region testtb-testEmptyExportFileSystemState,1,1734382966307.57d4207d2930040d72aff92536a70dd3. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T21:02:47,247 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41713 {}] regionserver.HRegion(8254): writing data to region testtb-testEmptyExportFileSystemState,,1734382966307.9cbe153285d5b4aa9bf2838f14e7e5fd. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T21:02:47,248 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-12-16T21:02:47,248 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testEmptyExportFileSystemState,,1734382966307.9cbe153285d5b4aa9bf2838f14e7e5fd. 2024-12-16T21:02:47,248 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T21:02:47,257 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-16T21:02:47,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734382967257 (current time:1734382967257). 2024-12-16T21:02:47,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-16T21:02:47,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-16T21:02:47,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T21:02:47,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1806bba9 to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1523d449 2024-12-16T21:02:47,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69ea51f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:02:47,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:02:47,264 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50278, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:02:47,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1806bba9 to 127.0.0.1:61742 2024-12-16T21:02:47,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:02:47,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4b94ff05 to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@409efba2 2024-12-16T21:02:47,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7bc05782, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:02:47,272 DEBUG [hconnection-0xb0e353a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:02:47,273 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50282, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:02:47,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:02:47,274 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60628, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:02:47,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4b94ff05 to 127.0.0.1:61742 2024-12-16T21:02:47,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:02:47,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] access.PermissionStorage(611): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-16T21:02:47,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T21:02:47,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=170, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-16T21:02:47,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 170 2024-12-16T21:02:47,277 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-16T21:02:47,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=170 2024-12-16T21:02:47,278 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T21:02:47,280 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T21:02:47,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742193_1369 (size=180) 2024-12-16T21:02:47,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742193_1369 (size=180) 2024-12-16T21:02:47,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742193_1369 (size=180) 2024-12-16T21:02:47,287 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T21:02:47,287 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=171, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 9cbe153285d5b4aa9bf2838f14e7e5fd}, {pid=172, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 57d4207d2930040d72aff92536a70dd3}] 2024-12-16T21:02:47,288 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=171, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 9cbe153285d5b4aa9bf2838f14e7e5fd 2024-12-16T21:02:47,288 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=172, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 57d4207d2930040d72aff92536a70dd3 2024-12-16T21:02:47,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=170 2024-12-16T21:02:47,439 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6b259cf15929,41713,1734382821274 2024-12-16T21:02:47,439 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6b259cf15929,42323,1734382821139 2024-12-16T21:02:47,439 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41713 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=171 2024-12-16T21:02:47,439 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42323 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=172 2024-12-16T21:02:47,439 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1734382966307.57d4207d2930040d72aff92536a70dd3. 2024-12-16T21:02:47,439 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1734382966307.9cbe153285d5b4aa9bf2838f14e7e5fd. 2024-12-16T21:02:47,440 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HRegion(2837): Flushing 9cbe153285d5b4aa9bf2838f14e7e5fd 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-16T21:02:47,440 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HRegion(2837): Flushing 57d4207d2930040d72aff92536a70dd3 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-16T21:02:47,456 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testEmptyExportFileSystemState/57d4207d2930040d72aff92536a70dd3/.tmp/cf/9c368c1f19ff4e66a02786abc7610611 is 71, key is 1600705eb163212b5a33429848af2dbf/cf:q/1734382967245/Put/seqid=0 2024-12-16T21:02:47,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742194_1370 (size=8326) 2024-12-16T21:02:47,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742194_1370 (size=8326) 2024-12-16T21:02:47,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742194_1370 (size=8326) 2024-12-16T21:02:47,463 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testEmptyExportFileSystemState/57d4207d2930040d72aff92536a70dd3/.tmp/cf/9c368c1f19ff4e66a02786abc7610611 2024-12-16T21:02:47,464 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testEmptyExportFileSystemState/9cbe153285d5b4aa9bf2838f14e7e5fd/.tmp/cf/2de48222f3fb4163b8d9a3a47ac7b6c3 is 71, key is 0482b205c950a3f99a013e17152ba478/cf:q/1734382967246/Put/seqid=0 2024-12-16T21:02:47,468 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testEmptyExportFileSystemState/57d4207d2930040d72aff92536a70dd3/.tmp/cf/9c368c1f19ff4e66a02786abc7610611 as hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testEmptyExportFileSystemState/57d4207d2930040d72aff92536a70dd3/cf/9c368c1f19ff4e66a02786abc7610611 2024-12-16T21:02:47,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742195_1371 (size=5286) 2024-12-16T21:02:47,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742195_1371 (size=5286) 2024-12-16T21:02:47,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742195_1371 (size=5286) 2024-12-16T21:02:47,472 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testEmptyExportFileSystemState/9cbe153285d5b4aa9bf2838f14e7e5fd/.tmp/cf/2de48222f3fb4163b8d9a3a47ac7b6c3 2024-12-16T21:02:47,473 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testEmptyExportFileSystemState/57d4207d2930040d72aff92536a70dd3/cf/9c368c1f19ff4e66a02786abc7610611, entries=47, sequenceid=6, filesize=8.1 K 2024-12-16T21:02:47,474 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 57d4207d2930040d72aff92536a70dd3 in 33ms, sequenceid=6, compaction requested=false 2024-12-16T21:02:47,474 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState' 2024-12-16T21:02:47,474 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HRegion(2538): Flush status journal for 57d4207d2930040d72aff92536a70dd3: 2024-12-16T21:02:47,474 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1734382966307.57d4207d2930040d72aff92536a70dd3. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-16T21:02:47,474 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1734382966307.57d4207d2930040d72aff92536a70dd3.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-16T21:02:47,474 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T21:02:47,474 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testEmptyExportFileSystemState/57d4207d2930040d72aff92536a70dd3/cf/9c368c1f19ff4e66a02786abc7610611] hfiles 2024-12-16T21:02:47,474 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testEmptyExportFileSystemState/57d4207d2930040d72aff92536a70dd3/cf/9c368c1f19ff4e66a02786abc7610611 for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-16T21:02:47,477 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testEmptyExportFileSystemState/9cbe153285d5b4aa9bf2838f14e7e5fd/.tmp/cf/2de48222f3fb4163b8d9a3a47ac7b6c3 as hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testEmptyExportFileSystemState/9cbe153285d5b4aa9bf2838f14e7e5fd/cf/2de48222f3fb4163b8d9a3a47ac7b6c3 2024-12-16T21:02:47,481 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testEmptyExportFileSystemState/9cbe153285d5b4aa9bf2838f14e7e5fd/cf/2de48222f3fb4163b8d9a3a47ac7b6c3, entries=3, sequenceid=6, filesize=5.2 K 2024-12-16T21:02:47,482 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HRegion(3040): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 9cbe153285d5b4aa9bf2838f14e7e5fd in 42ms, sequenceid=6, compaction requested=false 2024-12-16T21:02:47,482 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HRegion(2538): Flush status journal for 9cbe153285d5b4aa9bf2838f14e7e5fd: 2024-12-16T21:02:47,482 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1734382966307.9cbe153285d5b4aa9bf2838f14e7e5fd. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-16T21:02:47,482 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1734382966307.9cbe153285d5b4aa9bf2838f14e7e5fd.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-16T21:02:47,482 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T21:02:47,482 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testEmptyExportFileSystemState/9cbe153285d5b4aa9bf2838f14e7e5fd/cf/2de48222f3fb4163b8d9a3a47ac7b6c3] hfiles 2024-12-16T21:02:47,482 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testEmptyExportFileSystemState/9cbe153285d5b4aa9bf2838f14e7e5fd/cf/2de48222f3fb4163b8d9a3a47ac7b6c3 for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-16T21:02:47,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742196_1372 (size=115) 2024-12-16T21:02:47,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742196_1372 (size=115) 2024-12-16T21:02:47,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742196_1372 (size=115) 2024-12-16T21:02:47,484 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1734382966307.57d4207d2930040d72aff92536a70dd3. 2024-12-16T21:02:47,485 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=172 2024-12-16T21:02:47,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster(4106): Remote procedure done, pid=172 2024-12-16T21:02:47,485 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 57d4207d2930040d72aff92536a70dd3 2024-12-16T21:02:47,485 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=172, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 57d4207d2930040d72aff92536a70dd3 2024-12-16T21:02:47,487 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=170, state=SUCCESS; SnapshotRegionProcedure 57d4207d2930040d72aff92536a70dd3 in 199 msec 2024-12-16T21:02:47,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742197_1373 (size=115) 2024-12-16T21:02:47,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742197_1373 (size=115) 2024-12-16T21:02:47,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742197_1373 (size=115) 2024-12-16T21:02:47,492 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1734382966307.9cbe153285d5b4aa9bf2838f14e7e5fd. 2024-12-16T21:02:47,492 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=171 2024-12-16T21:02:47,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster(4106): Remote procedure done, pid=171 2024-12-16T21:02:47,492 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 9cbe153285d5b4aa9bf2838f14e7e5fd 2024-12-16T21:02:47,492 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=171, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 9cbe153285d5b4aa9bf2838f14e7e5fd 2024-12-16T21:02:47,494 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=171, resume processing ppid=170 2024-12-16T21:02:47,494 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, ppid=170, state=SUCCESS; SnapshotRegionProcedure 9cbe153285d5b4aa9bf2838f14e7e5fd in 206 msec 2024-12-16T21:02:47,494 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T21:02:47,495 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T21:02:47,496 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T21:02:47,496 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testEmptyExportFileSystemState 2024-12-16T21:02:47,496 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState 2024-12-16T21:02:47,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742198_1374 (size=645) 2024-12-16T21:02:47,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742198_1374 (size=645) 2024-12-16T21:02:47,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742198_1374 (size=645) 2024-12-16T21:02:47,509 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T21:02:47,513 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T21:02:47,513 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testEmptyExportFileSystemState 2024-12-16T21:02:47,514 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T21:02:47,514 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 170 2024-12-16T21:02:47,515 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 238 msec 2024-12-16T21:02:47,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=170 2024-12-16T21:02:47,579 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 170 completed 2024-12-16T21:02:47,580 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382967580 2024-12-16T21:02:47,580 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:37263, tgtDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382967580, rawTgtDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382967580, srcFsUri=hdfs://localhost:37263, srcDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:02:47,608 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:37263, inputRoot=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:02:47,608 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-715246216_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382967580, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382967580/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-16T21:02:47,610 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-16T21:02:47,614 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382967580/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-16T21:02:47,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742199_1375 (size=185) 2024-12-16T21:02:47,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742199_1375 (size=185) 2024-12-16T21:02:47,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742199_1375 (size=185) 2024-12-16T21:02:47,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742200_1376 (size=567) 2024-12-16T21:02:47,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742200_1376 (size=567) 2024-12-16T21:02:47,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742200_1376 (size=567) 2024-12-16T21:02:47,634 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-16T21:02:47,634 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-16T21:02:47,634 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-16T21:02:47,635 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-16T21:02:48,719 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/hadoop-6975234165724035522.jar 2024-12-16T21:02:48,720 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-16T21:02:48,720 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-16T21:02:48,796 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/hadoop-5754867519021447384.jar 2024-12-16T21:02:48,796 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-16T21:02:48,796 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-16T21:02:48,797 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-16T21:02:48,797 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-16T21:02:48,797 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-16T21:02:48,797 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-16T21:02:48,797 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-16T21:02:48,798 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-16T21:02:48,798 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-16T21:02:48,798 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-16T21:02:48,798 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-16T21:02:48,798 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-16T21:02:48,799 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-16T21:02:48,799 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-16T21:02:48,799 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-16T21:02:48,799 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-16T21:02:48,800 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-16T21:02:48,800 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-16T21:02:48,800 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T21:02:48,801 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T21:02:48,801 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T21:02:48,801 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T21:02:48,802 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T21:02:48,802 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T21:02:48,802 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T21:02:48,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742201_1377 (size=451756) 2024-12-16T21:02:48,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742201_1377 (size=451756) 2024-12-16T21:02:48,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742201_1377 (size=451756) 2024-12-16T21:02:48,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742202_1378 (size=127628) 2024-12-16T21:02:48,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742202_1378 (size=127628) 2024-12-16T21:02:48,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742202_1378 (size=127628) 2024-12-16T21:02:48,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742203_1379 (size=2172137) 2024-12-16T21:02:48,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742203_1379 (size=2172137) 2024-12-16T21:02:48,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742203_1379 (size=2172137) 2024-12-16T21:02:48,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742204_1380 (size=213228) 2024-12-16T21:02:48,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742204_1380 (size=213228) 2024-12-16T21:02:48,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742204_1380 (size=213228) 2024-12-16T21:02:48,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742205_1381 (size=1877034) 2024-12-16T21:02:48,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742205_1381 (size=1877034) 2024-12-16T21:02:48,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742205_1381 (size=1877034) 2024-12-16T21:02:48,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742206_1382 (size=533455) 2024-12-16T21:02:48,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742206_1382 (size=533455) 2024-12-16T21:02:48,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742206_1382 (size=533455) 2024-12-16T21:02:48,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742207_1383 (size=7280644) 2024-12-16T21:02:48,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742207_1383 (size=7280644) 2024-12-16T21:02:48,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742207_1383 (size=7280644) 2024-12-16T21:02:48,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742208_1384 (size=4188619) 2024-12-16T21:02:48,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742208_1384 (size=4188619) 2024-12-16T21:02:48,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742208_1384 (size=4188619) 2024-12-16T21:02:48,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742209_1385 (size=20406) 2024-12-16T21:02:48,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742209_1385 (size=20406) 2024-12-16T21:02:48,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742209_1385 (size=20406) 2024-12-16T21:02:48,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742210_1386 (size=75495) 2024-12-16T21:02:48,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742210_1386 (size=75495) 2024-12-16T21:02:48,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742210_1386 (size=75495) 2024-12-16T21:02:49,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742211_1387 (size=45609) 2024-12-16T21:02:49,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742211_1387 (size=45609) 2024-12-16T21:02:49,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742211_1387 (size=45609) 2024-12-16T21:02:49,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742212_1388 (size=110084) 2024-12-16T21:02:49,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742212_1388 (size=110084) 2024-12-16T21:02:49,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742212_1388 (size=110084) 2024-12-16T21:02:49,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742213_1389 (size=1323991) 2024-12-16T21:02:49,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742213_1389 (size=1323991) 2024-12-16T21:02:49,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742213_1389 (size=1323991) 2024-12-16T21:02:49,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742214_1390 (size=23076) 2024-12-16T21:02:49,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742214_1390 (size=23076) 2024-12-16T21:02:49,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742214_1390 (size=23076) 2024-12-16T21:02:49,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742215_1391 (size=126803) 2024-12-16T21:02:49,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742215_1391 (size=126803) 2024-12-16T21:02:49,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742215_1391 (size=126803) 2024-12-16T21:02:49,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742216_1392 (size=322274) 2024-12-16T21:02:49,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742216_1392 (size=322274) 2024-12-16T21:02:49,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742216_1392 (size=322274) 2024-12-16T21:02:49,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742217_1393 (size=1832290) 2024-12-16T21:02:49,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742217_1393 (size=1832290) 2024-12-16T21:02:49,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742217_1393 (size=1832290) 2024-12-16T21:02:49,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742218_1394 (size=30081) 2024-12-16T21:02:49,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742218_1394 (size=30081) 2024-12-16T21:02:49,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742218_1394 (size=30081) 2024-12-16T21:02:49,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742219_1395 (size=53616) 2024-12-16T21:02:49,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742219_1395 (size=53616) 2024-12-16T21:02:49,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742219_1395 (size=53616) 2024-12-16T21:02:49,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742220_1396 (size=29229) 2024-12-16T21:02:49,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742220_1396 (size=29229) 2024-12-16T21:02:49,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742220_1396 (size=29229) 2024-12-16T21:02:49,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742221_1397 (size=169089) 2024-12-16T21:02:49,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742221_1397 (size=169089) 2024-12-16T21:02:49,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742221_1397 (size=169089) 2024-12-16T21:02:49,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742222_1398 (size=5175431) 2024-12-16T21:02:49,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742222_1398 (size=5175431) 2024-12-16T21:02:49,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742222_1398 (size=5175431) 2024-12-16T21:02:49,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742223_1399 (size=136454) 2024-12-16T21:02:49,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742223_1399 (size=136454) 2024-12-16T21:02:49,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742223_1399 (size=136454) 2024-12-16T21:02:49,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742224_1400 (size=6350920) 2024-12-16T21:02:49,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742224_1400 (size=6350920) 2024-12-16T21:02:49,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742224_1400 (size=6350920) 2024-12-16T21:02:49,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742225_1401 (size=907465) 2024-12-16T21:02:49,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742225_1401 (size=907465) 2024-12-16T21:02:49,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742225_1401 (size=907465) 2024-12-16T21:02:49,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742226_1402 (size=3317408) 2024-12-16T21:02:49,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742226_1402 (size=3317408) 2024-12-16T21:02:49,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742226_1402 (size=3317408) 2024-12-16T21:02:49,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742227_1403 (size=503880) 2024-12-16T21:02:49,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742227_1403 (size=503880) 2024-12-16T21:02:49,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742227_1403 (size=503880) 2024-12-16T21:02:49,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742228_1404 (size=4695811) 2024-12-16T21:02:49,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742228_1404 (size=4695811) 2024-12-16T21:02:49,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742228_1404 (size=4695811) 2024-12-16T21:02:49,215 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-16T21:02:49,217 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'emptySnaptb0-testEmptyExportFileSystemState' hfile list 2024-12-16T21:02:49,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742229_1405 (size=7) 2024-12-16T21:02:49,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742229_1405 (size=7) 2024-12-16T21:02:49,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742229_1405 (size=7) 2024-12-16T21:02:49,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742230_1406 (size=10) 2024-12-16T21:02:49,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742230_1406 (size=10) 2024-12-16T21:02:49,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742230_1406 (size=10) 2024-12-16T21:02:49,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742231_1407 (size=304786) 2024-12-16T21:02:49,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742231_1407 (size=304786) 2024-12-16T21:02:49,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742231_1407 (size=304786) 2024-12-16T21:02:49,262 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-16T21:02:49,263 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-16T21:02:49,283 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T21:02:49,554 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0007_000001 (auth:SIMPLE) from 127.0.0.1:54796 2024-12-16T21:02:50,872 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-16T21:02:50,872 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-16T21:02:50,872 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-16T21:02:51,499 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-16T21:02:55,326 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0007_000001 (auth:SIMPLE) from 127.0.0.1:41234 2024-12-16T21:02:55,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742232_1408 (size=350436) 2024-12-16T21:02:55,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742232_1408 (size=350436) 2024-12-16T21:02:55,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742232_1408 (size=350436) 2024-12-16T21:02:56,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742233_1409 (size=8568) 2024-12-16T21:02:56,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742233_1409 (size=8568) 2024-12-16T21:02:56,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742233_1409 (size=8568) 2024-12-16T21:02:56,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742234_1410 (size=460) 2024-12-16T21:02:56,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742234_1410 (size=460) 2024-12-16T21:02:56,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742234_1410 (size=460) 2024-12-16T21:02:56,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742235_1411 (size=8568) 2024-12-16T21:02:56,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742235_1411 (size=8568) 2024-12-16T21:02:56,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742235_1411 (size=8568) 2024-12-16T21:02:56,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742236_1412 (size=350436) 2024-12-16T21:02:56,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742236_1412 (size=350436) 2024-12-16T21:02:56,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742236_1412 (size=350436) 2024-12-16T21:02:58,360 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-16T21:02:58,360 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-16T21:02:58,365 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: emptySnaptb0-testEmptyExportFileSystemState 2024-12-16T21:02:58,365 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-16T21:02:58,365 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-16T21:02:58,365 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-715246216_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-16T21:02:58,366 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-16T21:02:58,366 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-16T21:02:58,366 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-715246216_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382967580/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382967580/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-16T21:02:58,366 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382967580/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-16T21:02:58,366 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734382967580/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-16T21:02:58,371 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testEmptyExportFileSystemState 2024-12-16T21:02:58,371 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testEmptyExportFileSystemState 2024-12-16T21:02:58,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-16T21:02:58,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-16T21:02:58,374 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382978373"}]},"ts":"1734382978373"} 2024-12-16T21:02:58,375 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLING in hbase:meta 2024-12-16T21:02:58,377 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testEmptyExportFileSystemState to state=DISABLING 2024-12-16T21:02:58,378 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=174, ppid=173, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState}] 2024-12-16T21:02:58,379 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=175, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=9cbe153285d5b4aa9bf2838f14e7e5fd, UNASSIGN}, {pid=176, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=57d4207d2930040d72aff92536a70dd3, UNASSIGN}] 2024-12-16T21:02:58,379 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=176, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=57d4207d2930040d72aff92536a70dd3, UNASSIGN 2024-12-16T21:02:58,380 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=175, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=9cbe153285d5b4aa9bf2838f14e7e5fd, UNASSIGN 2024-12-16T21:02:58,380 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=176 updating hbase:meta row=57d4207d2930040d72aff92536a70dd3, regionState=CLOSING, regionLocation=6b259cf15929,42323,1734382821139 2024-12-16T21:02:58,380 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=175 updating hbase:meta row=9cbe153285d5b4aa9bf2838f14e7e5fd, regionState=CLOSING, regionLocation=6b259cf15929,41713,1734382821274 2024-12-16T21:02:58,381 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T21:02:58,381 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=177, ppid=175, state=RUNNABLE; CloseRegionProcedure 9cbe153285d5b4aa9bf2838f14e7e5fd, server=6b259cf15929,41713,1734382821274}] 2024-12-16T21:02:58,382 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T21:02:58,382 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=178, ppid=176, state=RUNNABLE; CloseRegionProcedure 57d4207d2930040d72aff92536a70dd3, server=6b259cf15929,42323,1734382821139}] 2024-12-16T21:02:58,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-16T21:02:58,533 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6b259cf15929,41713,1734382821274 2024-12-16T21:02:58,533 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6b259cf15929,42323,1734382821139 2024-12-16T21:02:58,533 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(124): Close 57d4207d2930040d72aff92536a70dd3 2024-12-16T21:02:58,533 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] handler.UnassignRegionHandler(124): Close 9cbe153285d5b4aa9bf2838f14e7e5fd 2024-12-16T21:02:58,533 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T21:02:58,533 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T21:02:58,533 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1681): Closing 57d4207d2930040d72aff92536a70dd3, disabling compactions & flushes 2024-12-16T21:02:58,533 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1681): Closing 9cbe153285d5b4aa9bf2838f14e7e5fd, disabling compactions & flushes 2024-12-16T21:02:58,533 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1703): Closing region testtb-testEmptyExportFileSystemState,1,1734382966307.57d4207d2930040d72aff92536a70dd3. 2024-12-16T21:02:58,533 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1703): Closing region testtb-testEmptyExportFileSystemState,,1734382966307.9cbe153285d5b4aa9bf2838f14e7e5fd. 2024-12-16T21:02:58,533 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testEmptyExportFileSystemState,1,1734382966307.57d4207d2930040d72aff92536a70dd3. 2024-12-16T21:02:58,533 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testEmptyExportFileSystemState,,1734382966307.9cbe153285d5b4aa9bf2838f14e7e5fd. 2024-12-16T21:02:58,533 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1791): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1734382966307.57d4207d2930040d72aff92536a70dd3. after waiting 0 ms 2024-12-16T21:02:58,533 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1791): Acquired close lock on testtb-testEmptyExportFileSystemState,,1734382966307.9cbe153285d5b4aa9bf2838f14e7e5fd. after waiting 0 ms 2024-12-16T21:02:58,534 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1801): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1734382966307.57d4207d2930040d72aff92536a70dd3. 2024-12-16T21:02:58,534 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1801): Updates disabled for region testtb-testEmptyExportFileSystemState,,1734382966307.9cbe153285d5b4aa9bf2838f14e7e5fd. 2024-12-16T21:02:58,537 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testEmptyExportFileSystemState/9cbe153285d5b4aa9bf2838f14e7e5fd/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T21:02:58,537 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testEmptyExportFileSystemState/57d4207d2930040d72aff92536a70dd3/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T21:02:58,538 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T21:02:58,538 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1922): Closed testtb-testEmptyExportFileSystemState,,1734382966307.9cbe153285d5b4aa9bf2838f14e7e5fd. 2024-12-16T21:02:58,538 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T21:02:58,538 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1635): Region close journal for 9cbe153285d5b4aa9bf2838f14e7e5fd: 2024-12-16T21:02:58,538 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1922): Closed testtb-testEmptyExportFileSystemState,1,1734382966307.57d4207d2930040d72aff92536a70dd3. 2024-12-16T21:02:58,538 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1635): Region close journal for 57d4207d2930040d72aff92536a70dd3: 2024-12-16T21:02:58,539 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] handler.UnassignRegionHandler(170): Closed 9cbe153285d5b4aa9bf2838f14e7e5fd 2024-12-16T21:02:58,539 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=175 updating hbase:meta row=9cbe153285d5b4aa9bf2838f14e7e5fd, regionState=CLOSED 2024-12-16T21:02:58,539 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(170): Closed 57d4207d2930040d72aff92536a70dd3 2024-12-16T21:02:58,540 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=176 updating hbase:meta row=57d4207d2930040d72aff92536a70dd3, regionState=CLOSED 2024-12-16T21:02:58,542 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=177, resume processing ppid=175 2024-12-16T21:02:58,542 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=178, resume processing ppid=176 2024-12-16T21:02:58,542 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=178, ppid=176, state=SUCCESS; CloseRegionProcedure 57d4207d2930040d72aff92536a70dd3, server=6b259cf15929,42323,1734382821139 in 159 msec 2024-12-16T21:02:58,542 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=177, ppid=175, state=SUCCESS; CloseRegionProcedure 9cbe153285d5b4aa9bf2838f14e7e5fd, server=6b259cf15929,41713,1734382821274 in 159 msec 2024-12-16T21:02:58,543 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=175, ppid=174, state=SUCCESS; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=9cbe153285d5b4aa9bf2838f14e7e5fd, UNASSIGN in 163 msec 2024-12-16T21:02:58,543 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=176, resume processing ppid=174 2024-12-16T21:02:58,543 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=176, ppid=174, state=SUCCESS; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=57d4207d2930040d72aff92536a70dd3, UNASSIGN in 163 msec 2024-12-16T21:02:58,545 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=174, resume processing ppid=173 2024-12-16T21:02:58,545 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=174, ppid=173, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState in 166 msec 2024-12-16T21:02:58,546 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382978546"}]},"ts":"1734382978546"} 2024-12-16T21:02:58,547 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLED in hbase:meta 2024-12-16T21:02:58,548 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testEmptyExportFileSystemState to state=DISABLED 2024-12-16T21:02:58,549 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; DisableTableProcedure table=testtb-testEmptyExportFileSystemState in 178 msec 2024-12-16T21:02:58,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-16T21:02:58,675 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 173 completed 2024-12-16T21:02:58,676 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testEmptyExportFileSystemState 2024-12-16T21:02:58,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=179, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-16T21:02:58,677 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=179, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-16T21:02:58,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testEmptyExportFileSystemState 2024-12-16T21:02:58,678 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=179, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-16T21:02:58,679 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42323 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testEmptyExportFileSystemState 2024-12-16T21:02:58,681 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testEmptyExportFileSystemState/9cbe153285d5b4aa9bf2838f14e7e5fd 2024-12-16T21:02:58,681 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testEmptyExportFileSystemState/57d4207d2930040d72aff92536a70dd3 2024-12-16T21:02:58,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-16T21:02:58,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-16T21:02:58,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-16T21:02:58,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-16T21:02:58,683 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testEmptyExportFileSystemState/9cbe153285d5b4aa9bf2838f14e7e5fd/cf, FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testEmptyExportFileSystemState/9cbe153285d5b4aa9bf2838f14e7e5fd/recovered.edits] 2024-12-16T21:02:58,683 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-16T21:02:58,683 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-16T21:02:58,683 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-16T21:02:58,683 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testEmptyExportFileSystemState/57d4207d2930040d72aff92536a70dd3/cf, FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testEmptyExportFileSystemState/57d4207d2930040d72aff92536a70dd3/recovered.edits] 2024-12-16T21:02:58,684 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:58,684 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-16T21:02:58,684 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-16T21:02:58,684 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-16T21:02:58,684 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:58,684 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:58,684 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data null 2024-12-16T21:02:58,685 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-16T21:02:58,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:58,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-16T21:02:58,685 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:58,686 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:58,686 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:58,686 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:58,688 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testEmptyExportFileSystemState/9cbe153285d5b4aa9bf2838f14e7e5fd/cf/2de48222f3fb4163b8d9a3a47ac7b6c3 to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testEmptyExportFileSystemState/9cbe153285d5b4aa9bf2838f14e7e5fd/cf/2de48222f3fb4163b8d9a3a47ac7b6c3 2024-12-16T21:02:58,689 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testEmptyExportFileSystemState/57d4207d2930040d72aff92536a70dd3/cf/9c368c1f19ff4e66a02786abc7610611 to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testEmptyExportFileSystemState/57d4207d2930040d72aff92536a70dd3/cf/9c368c1f19ff4e66a02786abc7610611 2024-12-16T21:02:58,691 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testEmptyExportFileSystemState/9cbe153285d5b4aa9bf2838f14e7e5fd/recovered.edits/9.seqid to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testEmptyExportFileSystemState/9cbe153285d5b4aa9bf2838f14e7e5fd/recovered.edits/9.seqid 2024-12-16T21:02:58,691 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testEmptyExportFileSystemState/57d4207d2930040d72aff92536a70dd3/recovered.edits/9.seqid to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testEmptyExportFileSystemState/57d4207d2930040d72aff92536a70dd3/recovered.edits/9.seqid 2024-12-16T21:02:58,692 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testEmptyExportFileSystemState/9cbe153285d5b4aa9bf2838f14e7e5fd 2024-12-16T21:02:58,692 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testEmptyExportFileSystemState/57d4207d2930040d72aff92536a70dd3 2024-12-16T21:02:58,692 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testEmptyExportFileSystemState regions 2024-12-16T21:02:58,694 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=179, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-16T21:02:58,696 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testEmptyExportFileSystemState from hbase:meta 2024-12-16T21:02:58,698 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testEmptyExportFileSystemState' descriptor. 2024-12-16T21:02:58,699 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=179, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-16T21:02:58,699 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testEmptyExportFileSystemState' from region states. 2024-12-16T21:02:58,699 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,,1734382966307.9cbe153285d5b4aa9bf2838f14e7e5fd.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734382978699"}]},"ts":"9223372036854775807"} 2024-12-16T21:02:58,699 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,1,1734382966307.57d4207d2930040d72aff92536a70dd3.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734382978699"}]},"ts":"9223372036854775807"} 2024-12-16T21:02:58,701 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-16T21:02:58,701 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 9cbe153285d5b4aa9bf2838f14e7e5fd, NAME => 'testtb-testEmptyExportFileSystemState,,1734382966307.9cbe153285d5b4aa9bf2838f14e7e5fd.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 57d4207d2930040d72aff92536a70dd3, NAME => 'testtb-testEmptyExportFileSystemState,1,1734382966307.57d4207d2930040d72aff92536a70dd3.', STARTKEY => '1', ENDKEY => ''}] 2024-12-16T21:02:58,701 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testEmptyExportFileSystemState' as deleted. 2024-12-16T21:02:58,701 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734382978701"}]},"ts":"9223372036854775807"} 2024-12-16T21:02:58,702 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testEmptyExportFileSystemState state from META 2024-12-16T21:02:58,704 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=179, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-16T21:02:58,706 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=179, state=SUCCESS; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState in 28 msec 2024-12-16T21:02:58,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-16T21:02:58,787 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 179 completed 2024-12-16T21:02:58,793 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testEmptyExportFileSystemState" 2024-12-16T21:02:58,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testEmptyExportFileSystemState 2024-12-16T21:02:58,795 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testEmptyExportFileSystemState" 2024-12-16T21:02:58,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testEmptyExportFileSystemState 2024-12-16T21:02:58,821 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=813 (was 801) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40141 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1891196410) connection to localhost/127.0.0.1:46387 from appattempt_1734382828217_0007_000001 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: hconnection-0x70b78df7-shared-pool-40 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-715246216_22 at /127.0.0.1:47300 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1665769826_1 at /127.0.0.1:47284 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-715246216_22 at /127.0.0.1:35664 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x70b78df7-shared-pool-41 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x70b78df7-shared-pool-42 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1665769826_1 at /127.0.0.1:35618 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1891196410) connection to localhost/127.0.0.1:40141 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-715246216_22 at /127.0.0.1:39464 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-5681 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: process reaper (pid 7006) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x70b78df7-shared-pool-39 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=823 (was 799) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=449 (was 384) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=3282 (was 4115) 2024-12-16T21:02:58,821 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=813 is superior to 500 2024-12-16T21:02:58,841 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=813, OpenFileDescriptor=823, MaxFileDescriptor=1048576, SystemLoadAverage=449, ProcessCount=17, AvailableMemoryMB=3281 2024-12-16T21:02:58,841 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=813 is superior to 500 2024-12-16T21:02:58,843 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T21:02:58,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportWithChecksum 2024-12-16T21:02:58,844 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T21:02:58,845 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:02:58,845 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithChecksum" procId is: 180 2024-12-16T21:02:58,845 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T21:02:58,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=180 2024-12-16T21:02:58,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742237_1413 (size=404) 2024-12-16T21:02:58,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742237_1413 (size=404) 2024-12-16T21:02:58,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742237_1413 (size=404) 2024-12-16T21:02:58,854 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => bd59e4492efad7b56f6a95b7ff86005c, NAME => 'testtb-testExportWithChecksum,1,1734382978843.bd59e4492efad7b56f6a95b7ff86005c.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:02:58,854 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 49b5609b2a32ef56940986b9d60dd932, NAME => 'testtb-testExportWithChecksum,,1734382978843.49b5609b2a32ef56940986b9d60dd932.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:02:58,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742238_1414 (size=65) 2024-12-16T21:02:58,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742238_1414 (size=65) 2024-12-16T21:02:58,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742238_1414 (size=65) 2024-12-16T21:02:58,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742239_1415 (size=65) 2024-12-16T21:02:58,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742239_1415 (size=65) 2024-12-16T21:02:58,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742239_1415 (size=65) 2024-12-16T21:02:58,865 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithChecksum,1,1734382978843.bd59e4492efad7b56f6a95b7ff86005c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:02:58,865 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1681): Closing bd59e4492efad7b56f6a95b7ff86005c, disabling compactions & flushes 2024-12-16T21:02:58,865 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithChecksum,1,1734382978843.bd59e4492efad7b56f6a95b7ff86005c. 2024-12-16T21:02:58,865 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithChecksum,1,1734382978843.bd59e4492efad7b56f6a95b7ff86005c. 2024-12-16T21:02:58,865 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithChecksum,1,1734382978843.bd59e4492efad7b56f6a95b7ff86005c. after waiting 0 ms 2024-12-16T21:02:58,865 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithChecksum,1,1734382978843.bd59e4492efad7b56f6a95b7ff86005c. 2024-12-16T21:02:58,865 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportWithChecksum,1,1734382978843.bd59e4492efad7b56f6a95b7ff86005c. 2024-12-16T21:02:58,865 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1635): Region close journal for bd59e4492efad7b56f6a95b7ff86005c: 2024-12-16T21:02:58,865 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithChecksum,,1734382978843.49b5609b2a32ef56940986b9d60dd932.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:02:58,866 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1681): Closing 49b5609b2a32ef56940986b9d60dd932, disabling compactions & flushes 2024-12-16T21:02:58,866 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithChecksum,,1734382978843.49b5609b2a32ef56940986b9d60dd932. 2024-12-16T21:02:58,866 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithChecksum,,1734382978843.49b5609b2a32ef56940986b9d60dd932. 2024-12-16T21:02:58,866 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithChecksum,,1734382978843.49b5609b2a32ef56940986b9d60dd932. after waiting 0 ms 2024-12-16T21:02:58,866 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithChecksum,,1734382978843.49b5609b2a32ef56940986b9d60dd932. 2024-12-16T21:02:58,866 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportWithChecksum,,1734382978843.49b5609b2a32ef56940986b9d60dd932. 2024-12-16T21:02:58,866 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1635): Region close journal for 49b5609b2a32ef56940986b9d60dd932: 2024-12-16T21:02:58,867 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T21:02:58,867 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,1,1734382978843.bd59e4492efad7b56f6a95b7ff86005c.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1734382978867"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734382978867"}]},"ts":"1734382978867"} 2024-12-16T21:02:58,867 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,,1734382978843.49b5609b2a32ef56940986b9d60dd932.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1734382978867"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734382978867"}]},"ts":"1734382978867"} 2024-12-16T21:02:58,869 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-16T21:02:58,870 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T21:02:58,870 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382978870"}]},"ts":"1734382978870"} 2024-12-16T21:02:58,871 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithChecksum, state=ENABLING in hbase:meta 2024-12-16T21:02:58,874 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(202): Hosts are {6b259cf15929=0} racks are {/default-rack=0} 2024-12-16T21:02:58,875 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-16T21:02:58,875 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-16T21:02:58,875 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-16T21:02:58,875 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-16T21:02:58,875 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-16T21:02:58,875 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-16T21:02:58,875 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-16T21:02:58,876 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=49b5609b2a32ef56940986b9d60dd932, ASSIGN}, {pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=bd59e4492efad7b56f6a95b7ff86005c, ASSIGN}] 2024-12-16T21:02:58,876 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=49b5609b2a32ef56940986b9d60dd932, ASSIGN 2024-12-16T21:02:58,876 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=bd59e4492efad7b56f6a95b7ff86005c, ASSIGN 2024-12-16T21:02:58,877 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=bd59e4492efad7b56f6a95b7ff86005c, ASSIGN; state=OFFLINE, location=6b259cf15929,36861,1734382821216; forceNewPlan=false, retain=false 2024-12-16T21:02:58,877 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=49b5609b2a32ef56940986b9d60dd932, ASSIGN; state=OFFLINE, location=6b259cf15929,42323,1734382821139; forceNewPlan=false, retain=false 2024-12-16T21:02:58,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=180 2024-12-16T21:02:59,028 INFO [6b259cf15929:34343 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-16T21:02:59,028 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=182 updating hbase:meta row=bd59e4492efad7b56f6a95b7ff86005c, regionState=OPENING, regionLocation=6b259cf15929,36861,1734382821216 2024-12-16T21:02:59,028 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=181 updating hbase:meta row=49b5609b2a32ef56940986b9d60dd932, regionState=OPENING, regionLocation=6b259cf15929,42323,1734382821139 2024-12-16T21:02:59,029 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=183, ppid=182, state=RUNNABLE; OpenRegionProcedure bd59e4492efad7b56f6a95b7ff86005c, server=6b259cf15929,36861,1734382821216}] 2024-12-16T21:02:59,030 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=184, ppid=181, state=RUNNABLE; OpenRegionProcedure 49b5609b2a32ef56940986b9d60dd932, server=6b259cf15929,42323,1734382821139}] 2024-12-16T21:02:59,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=180 2024-12-16T21:02:59,181 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6b259cf15929,36861,1734382821216 2024-12-16T21:02:59,181 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6b259cf15929,42323,1734382821139 2024-12-16T21:02:59,184 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(135): Open testtb-testExportWithChecksum,,1734382978843.49b5609b2a32ef56940986b9d60dd932. 2024-12-16T21:02:59,184 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(135): Open testtb-testExportWithChecksum,1,1734382978843.bd59e4492efad7b56f6a95b7ff86005c. 2024-12-16T21:02:59,184 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7285): Opening region: {ENCODED => 49b5609b2a32ef56940986b9d60dd932, NAME => 'testtb-testExportWithChecksum,,1734382978843.49b5609b2a32ef56940986b9d60dd932.', STARTKEY => '', ENDKEY => '1'} 2024-12-16T21:02:59,184 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7285): Opening region: {ENCODED => bd59e4492efad7b56f6a95b7ff86005c, NAME => 'testtb-testExportWithChecksum,1,1734382978843.bd59e4492efad7b56f6a95b7ff86005c.', STARTKEY => '1', ENDKEY => ''} 2024-12-16T21:02:59,184 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithChecksum,,1734382978843.49b5609b2a32ef56940986b9d60dd932. service=AccessControlService 2024-12-16T21:02:59,184 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithChecksum,1,1734382978843.bd59e4492efad7b56f6a95b7ff86005c. service=AccessControlService 2024-12-16T21:02:59,184 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T21:02:59,184 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T21:02:59,185 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 49b5609b2a32ef56940986b9d60dd932 2024-12-16T21:02:59,185 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum bd59e4492efad7b56f6a95b7ff86005c 2024-12-16T21:02:59,185 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(894): Instantiated testtb-testExportWithChecksum,,1734382978843.49b5609b2a32ef56940986b9d60dd932.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:02:59,185 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(894): Instantiated testtb-testExportWithChecksum,1,1734382978843.bd59e4492efad7b56f6a95b7ff86005c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:02:59,185 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7327): checking encryption for 49b5609b2a32ef56940986b9d60dd932 2024-12-16T21:02:59,185 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7327): checking encryption for bd59e4492efad7b56f6a95b7ff86005c 2024-12-16T21:02:59,185 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7330): checking classloading for 49b5609b2a32ef56940986b9d60dd932 2024-12-16T21:02:59,185 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7330): checking classloading for bd59e4492efad7b56f6a95b7ff86005c 2024-12-16T21:02:59,186 INFO [StoreOpener-49b5609b2a32ef56940986b9d60dd932-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 49b5609b2a32ef56940986b9d60dd932 2024-12-16T21:02:59,186 INFO [StoreOpener-bd59e4492efad7b56f6a95b7ff86005c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region bd59e4492efad7b56f6a95b7ff86005c 2024-12-16T21:02:59,187 INFO [StoreOpener-bd59e4492efad7b56f6a95b7ff86005c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bd59e4492efad7b56f6a95b7ff86005c columnFamilyName cf 2024-12-16T21:02:59,187 INFO [StoreOpener-49b5609b2a32ef56940986b9d60dd932-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 49b5609b2a32ef56940986b9d60dd932 columnFamilyName cf 2024-12-16T21:02:59,187 DEBUG [StoreOpener-bd59e4492efad7b56f6a95b7ff86005c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:02:59,187 DEBUG [StoreOpener-49b5609b2a32ef56940986b9d60dd932-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:02:59,188 INFO [StoreOpener-bd59e4492efad7b56f6a95b7ff86005c-1 {}] regionserver.HStore(327): Store=bd59e4492efad7b56f6a95b7ff86005c/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T21:02:59,188 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithChecksum/bd59e4492efad7b56f6a95b7ff86005c 2024-12-16T21:02:59,189 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithChecksum/bd59e4492efad7b56f6a95b7ff86005c 2024-12-16T21:02:59,189 INFO [StoreOpener-49b5609b2a32ef56940986b9d60dd932-1 {}] regionserver.HStore(327): Store=49b5609b2a32ef56940986b9d60dd932/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T21:02:59,189 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithChecksum/49b5609b2a32ef56940986b9d60dd932 2024-12-16T21:02:59,189 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithChecksum/49b5609b2a32ef56940986b9d60dd932 2024-12-16T21:02:59,190 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1085): writing seq id for bd59e4492efad7b56f6a95b7ff86005c 2024-12-16T21:02:59,191 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1085): writing seq id for 49b5609b2a32ef56940986b9d60dd932 2024-12-16T21:02:59,192 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithChecksum/bd59e4492efad7b56f6a95b7ff86005c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T21:02:59,193 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1102): Opened bd59e4492efad7b56f6a95b7ff86005c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67869587, jitterRate=0.01133565604686737}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T21:02:59,193 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithChecksum/49b5609b2a32ef56940986b9d60dd932/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T21:02:59,193 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1001): Region open journal for bd59e4492efad7b56f6a95b7ff86005c: 2024-12-16T21:02:59,193 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1102): Opened 49b5609b2a32ef56940986b9d60dd932; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60744798, jitterRate=-0.09483197331428528}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T21:02:59,193 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1001): Region open journal for 49b5609b2a32ef56940986b9d60dd932: 2024-12-16T21:02:59,194 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithChecksum,1,1734382978843.bd59e4492efad7b56f6a95b7ff86005c., pid=183, masterSystemTime=1734382979181 2024-12-16T21:02:59,194 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithChecksum,,1734382978843.49b5609b2a32ef56940986b9d60dd932., pid=184, masterSystemTime=1734382979181 2024-12-16T21:02:59,195 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithChecksum,1,1734382978843.bd59e4492efad7b56f6a95b7ff86005c. 2024-12-16T21:02:59,195 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(164): Opened testtb-testExportWithChecksum,1,1734382978843.bd59e4492efad7b56f6a95b7ff86005c. 2024-12-16T21:02:59,195 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=182 updating hbase:meta row=bd59e4492efad7b56f6a95b7ff86005c, regionState=OPEN, openSeqNum=2, regionLocation=6b259cf15929,36861,1734382821216 2024-12-16T21:02:59,195 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithChecksum,,1734382978843.49b5609b2a32ef56940986b9d60dd932. 2024-12-16T21:02:59,196 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(164): Opened testtb-testExportWithChecksum,,1734382978843.49b5609b2a32ef56940986b9d60dd932. 2024-12-16T21:02:59,196 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=181 updating hbase:meta row=49b5609b2a32ef56940986b9d60dd932, regionState=OPEN, openSeqNum=2, regionLocation=6b259cf15929,42323,1734382821139 2024-12-16T21:02:59,198 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=183, resume processing ppid=182 2024-12-16T21:02:59,198 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=183, ppid=182, state=SUCCESS; OpenRegionProcedure bd59e4492efad7b56f6a95b7ff86005c, server=6b259cf15929,36861,1734382821216 in 168 msec 2024-12-16T21:02:59,198 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=184, resume processing ppid=181 2024-12-16T21:02:59,199 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=184, ppid=181, state=SUCCESS; OpenRegionProcedure 49b5609b2a32ef56940986b9d60dd932, server=6b259cf15929,42323,1734382821139 in 167 msec 2024-12-16T21:02:59,199 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=182, ppid=180, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=bd59e4492efad7b56f6a95b7ff86005c, ASSIGN in 322 msec 2024-12-16T21:02:59,200 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=181, resume processing ppid=180 2024-12-16T21:02:59,200 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=181, ppid=180, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=49b5609b2a32ef56940986b9d60dd932, ASSIGN in 323 msec 2024-12-16T21:02:59,201 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T21:02:59,201 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734382979201"}]},"ts":"1734382979201"} 2024-12-16T21:02:59,202 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithChecksum, state=ENABLED in hbase:meta 2024-12-16T21:02:59,204 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T21:02:59,204 DEBUG [PEWorker-1 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportWithChecksum jenkins: RWXCA 2024-12-16T21:02:59,206 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42323 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-16T21:02:59,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:59,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:59,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:59,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:02:59,210 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:59,210 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:59,210 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:59,210 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:59,210 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:59,210 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:59,210 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:59,210 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-16T21:02:59,211 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=180, state=SUCCESS; CreateTableProcedure table=testtb-testExportWithChecksum in 367 msec 2024-12-16T21:02:59,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=180 2024-12-16T21:02:59,448 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportWithChecksum, procId: 180 completed 2024-12-16T21:02:59,448 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportWithChecksum get assigned. Timeout = 60000ms 2024-12-16T21:02:59,448 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T21:02:59,452 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportWithChecksum assigned to meta. Checking AM states. 2024-12-16T21:02:59,452 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T21:02:59,452 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportWithChecksum assigned. 2024-12-16T21:02:59,455 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-16T21:02:59,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734382979455 (current time:1734382979455). 2024-12-16T21:02:59,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-16T21:02:59,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-16T21:02:59,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T21:02:59,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x217c88da to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2a17f44a 2024-12-16T21:02:59,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f846b7b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:02:59,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:02:59,462 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45766, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:02:59,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x217c88da to 127.0.0.1:61742 2024-12-16T21:02:59,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:02:59,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5d143bf6 to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@48b421ba 2024-12-16T21:02:59,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34174464, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:02:59,468 DEBUG [hconnection-0x766df88d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:02:59,468 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45776, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:02:59,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:02:59,470 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46424, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:02:59,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5d143bf6 to 127.0.0.1:61742 2024-12-16T21:02:59,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:02:59,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-16T21:02:59,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T21:02:59,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=185, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-16T21:02:59,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 185 2024-12-16T21:02:59,473 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-16T21:02:59,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-16T21:02:59,474 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T21:02:59,476 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T21:02:59,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742240_1416 (size=161) 2024-12-16T21:02:59,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742240_1416 (size=161) 2024-12-16T21:02:59,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742240_1416 (size=161) 2024-12-16T21:02:59,483 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T21:02:59,483 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=186, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 49b5609b2a32ef56940986b9d60dd932}, {pid=187, ppid=185, state=RUNNABLE; SnapshotRegionProcedure bd59e4492efad7b56f6a95b7ff86005c}] 2024-12-16T21:02:59,483 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=187, ppid=185, state=RUNNABLE; SnapshotRegionProcedure bd59e4492efad7b56f6a95b7ff86005c 2024-12-16T21:02:59,483 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=186, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 49b5609b2a32ef56940986b9d60dd932 2024-12-16T21:02:59,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-16T21:02:59,634 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6b259cf15929,42323,1734382821139 2024-12-16T21:02:59,634 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6b259cf15929,36861,1734382821216 2024-12-16T21:02:59,635 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42323 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=186 2024-12-16T21:02:59,635 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36861 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=187 2024-12-16T21:02:59,635 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1734382978843.49b5609b2a32ef56940986b9d60dd932. 2024-12-16T21:02:59,635 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1734382978843.bd59e4492efad7b56f6a95b7ff86005c. 2024-12-16T21:02:59,635 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2538): Flush status journal for 49b5609b2a32ef56940986b9d60dd932: 2024-12-16T21:02:59,635 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2538): Flush status journal for bd59e4492efad7b56f6a95b7ff86005c: 2024-12-16T21:02:59,635 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1734382978843.49b5609b2a32ef56940986b9d60dd932. for emptySnaptb0-testExportWithChecksum completed. 2024-12-16T21:02:59,635 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1734382978843.bd59e4492efad7b56f6a95b7ff86005c. for emptySnaptb0-testExportWithChecksum completed. 2024-12-16T21:02:59,635 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1734382978843.49b5609b2a32ef56940986b9d60dd932.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-16T21:02:59,635 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T21:02:59,635 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1734382978843.bd59e4492efad7b56f6a95b7ff86005c.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-16T21:02:59,635 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-16T21:02:59,636 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T21:02:59,636 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-16T21:02:59,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742241_1417 (size=68) 2024-12-16T21:02:59,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742241_1417 (size=68) 2024-12-16T21:02:59,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742241_1417 (size=68) 2024-12-16T21:02:59,646 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1734382978843.bd59e4492efad7b56f6a95b7ff86005c. 2024-12-16T21:02:59,647 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=187 2024-12-16T21:02:59,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster(4106): Remote procedure done, pid=187 2024-12-16T21:02:59,647 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region bd59e4492efad7b56f6a95b7ff86005c 2024-12-16T21:02:59,647 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=187, ppid=185, state=RUNNABLE; SnapshotRegionProcedure bd59e4492efad7b56f6a95b7ff86005c 2024-12-16T21:02:59,649 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=187, ppid=185, state=SUCCESS; SnapshotRegionProcedure bd59e4492efad7b56f6a95b7ff86005c in 165 msec 2024-12-16T21:02:59,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742242_1418 (size=68) 2024-12-16T21:02:59,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742242_1418 (size=68) 2024-12-16T21:02:59,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742242_1418 (size=68) 2024-12-16T21:02:59,652 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1734382978843.49b5609b2a32ef56940986b9d60dd932. 2024-12-16T21:02:59,652 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=186 2024-12-16T21:02:59,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster(4106): Remote procedure done, pid=186 2024-12-16T21:02:59,652 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 49b5609b2a32ef56940986b9d60dd932 2024-12-16T21:02:59,653 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=186, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 49b5609b2a32ef56940986b9d60dd932 2024-12-16T21:02:59,654 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=186, resume processing ppid=185 2024-12-16T21:02:59,655 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=186, ppid=185, state=SUCCESS; SnapshotRegionProcedure 49b5609b2a32ef56940986b9d60dd932 in 170 msec 2024-12-16T21:02:59,655 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T21:02:59,655 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T21:02:59,656 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T21:02:59,656 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithChecksum 2024-12-16T21:02:59,656 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum 2024-12-16T21:02:59,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742243_1419 (size=543) 2024-12-16T21:02:59,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742243_1419 (size=543) 2024-12-16T21:02:59,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742243_1419 (size=543) 2024-12-16T21:02:59,667 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T21:02:59,670 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T21:02:59,671 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/emptySnaptb0-testExportWithChecksum 2024-12-16T21:02:59,672 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T21:02:59,672 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 185 2024-12-16T21:02:59,673 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=185, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 200 msec 2024-12-16T21:02:59,688 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-12-16T21:02:59,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-16T21:02:59,775 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum, procId: 185 completed 2024-12-16T21:02:59,781 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42323 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithChecksum,,1734382978843.49b5609b2a32ef56940986b9d60dd932. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T21:02:59,782 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithChecksum,1,1734382978843.bd59e4492efad7b56f6a95b7ff86005c. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T21:02:59,785 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportWithChecksum 2024-12-16T21:02:59,785 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportWithChecksum,,1734382978843.49b5609b2a32ef56940986b9d60dd932. 2024-12-16T21:02:59,785 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T21:02:59,795 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-16T21:02:59,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734382979795 (current time:1734382979795). 2024-12-16T21:02:59,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-16T21:02:59,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-16T21:02:59,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T21:02:59,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6db0a35c to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@748097f7 2024-12-16T21:02:59,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b34ac14, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:02:59,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:02:59,802 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45784, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:02:59,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6db0a35c to 127.0.0.1:61742 2024-12-16T21:02:59,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:02:59,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x77b127a9 to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@77e3987 2024-12-16T21:02:59,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9a47170, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:02:59,808 DEBUG [hconnection-0x8bcf491-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:02:59,809 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45790, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:02:59,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:02:59,811 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46430, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:02:59,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x77b127a9 to 127.0.0.1:61742 2024-12-16T21:02:59,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:02:59,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-16T21:02:59,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T21:02:59,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=188, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-16T21:02:59,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 188 2024-12-16T21:02:59,814 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-16T21:02:59,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=188 2024-12-16T21:02:59,815 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T21:02:59,817 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T21:02:59,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742244_1420 (size=156) 2024-12-16T21:02:59,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742244_1420 (size=156) 2024-12-16T21:02:59,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742244_1420 (size=156) 2024-12-16T21:02:59,824 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T21:02:59,824 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=189, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 49b5609b2a32ef56940986b9d60dd932}, {pid=190, ppid=188, state=RUNNABLE; SnapshotRegionProcedure bd59e4492efad7b56f6a95b7ff86005c}] 2024-12-16T21:02:59,825 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=189, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 49b5609b2a32ef56940986b9d60dd932 2024-12-16T21:02:59,825 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=190, ppid=188, state=RUNNABLE; SnapshotRegionProcedure bd59e4492efad7b56f6a95b7ff86005c 2024-12-16T21:02:59,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=188 2024-12-16T21:02:59,975 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6b259cf15929,42323,1734382821139 2024-12-16T21:02:59,975 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6b259cf15929,36861,1734382821216 2024-12-16T21:02:59,976 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36861 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=190 2024-12-16T21:02:59,976 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42323 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=189 2024-12-16T21:02:59,976 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1734382978843.bd59e4492efad7b56f6a95b7ff86005c. 2024-12-16T21:02:59,976 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1734382978843.49b5609b2a32ef56940986b9d60dd932. 2024-12-16T21:02:59,976 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegion(2837): Flushing 49b5609b2a32ef56940986b9d60dd932 1/1 column families, dataSize=65 B heapSize=400 B 2024-12-16T21:02:59,976 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegion(2837): Flushing bd59e4492efad7b56f6a95b7ff86005c 1/1 column families, dataSize=3.19 KB heapSize=7.14 KB 2024-12-16T21:02:59,992 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithChecksum/49b5609b2a32ef56940986b9d60dd932/.tmp/cf/2652152be32845a1acc80780a7365aed is 69, key is 0faf3d3f5d740913b01b45e6ac2579eba/cf:q/1734382979781/Put/seqid=0 2024-12-16T21:02:59,992 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithChecksum/bd59e4492efad7b56f6a95b7ff86005c/.tmp/cf/a7935e9cc6394b6bb4d2dfd83b2df1ac is 71, key is 153bbe99fb93d8ac7a36aa097908a0e6/cf:q/1734382979782/Put/seqid=0 2024-12-16T21:02:59,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742246_1422 (size=5149) 2024-12-16T21:02:59,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742246_1422 (size=5149) 2024-12-16T21:02:59,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742246_1422 (size=5149) 2024-12-16T21:02:59,999 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithChecksum/49b5609b2a32ef56940986b9d60dd932/.tmp/cf/2652152be32845a1acc80780a7365aed 2024-12-16T21:03:00,003 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithChecksum/49b5609b2a32ef56940986b9d60dd932/.tmp/cf/2652152be32845a1acc80780a7365aed as hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithChecksum/49b5609b2a32ef56940986b9d60dd932/cf/2652152be32845a1acc80780a7365aed 2024-12-16T21:03:00,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742245_1421 (size=8462) 2024-12-16T21:03:00,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742245_1421 (size=8462) 2024-12-16T21:03:00,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742245_1421 (size=8462) 2024-12-16T21:03:00,006 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.19 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithChecksum/bd59e4492efad7b56f6a95b7ff86005c/.tmp/cf/a7935e9cc6394b6bb4d2dfd83b2df1ac 2024-12-16T21:03:00,007 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithChecksum/49b5609b2a32ef56940986b9d60dd932/cf/2652152be32845a1acc80780a7365aed, entries=1, sequenceid=6, filesize=5.0 K 2024-12-16T21:03:00,008 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegion(3040): Finished flush of dataSize ~65 B/65, heapSize ~384 B/384, currentSize=0 B/0 for 49b5609b2a32ef56940986b9d60dd932 in 32ms, sequenceid=6, compaction requested=false 2024-12-16T21:03:00,008 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegion(2538): Flush status journal for 49b5609b2a32ef56940986b9d60dd932: 2024-12-16T21:03:00,008 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1734382978843.49b5609b2a32ef56940986b9d60dd932. for snaptb0-testExportWithChecksum completed. 2024-12-16T21:03:00,008 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1734382978843.49b5609b2a32ef56940986b9d60dd932.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-16T21:03:00,008 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T21:03:00,009 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithChecksum/49b5609b2a32ef56940986b9d60dd932/cf/2652152be32845a1acc80780a7365aed] hfiles 2024-12-16T21:03:00,009 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithChecksum/49b5609b2a32ef56940986b9d60dd932/cf/2652152be32845a1acc80780a7365aed for snapshot=snaptb0-testExportWithChecksum 2024-12-16T21:03:00,010 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithChecksum/bd59e4492efad7b56f6a95b7ff86005c/.tmp/cf/a7935e9cc6394b6bb4d2dfd83b2df1ac as hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithChecksum/bd59e4492efad7b56f6a95b7ff86005c/cf/a7935e9cc6394b6bb4d2dfd83b2df1ac 2024-12-16T21:03:00,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742247_1423 (size=107) 2024-12-16T21:03:00,014 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithChecksum/bd59e4492efad7b56f6a95b7ff86005c/cf/a7935e9cc6394b6bb4d2dfd83b2df1ac, entries=49, sequenceid=6, filesize=8.3 K 2024-12-16T21:03:00,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742247_1423 (size=107) 2024-12-16T21:03:00,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742247_1423 (size=107) 2024-12-16T21:03:00,014 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1734382978843.49b5609b2a32ef56940986b9d60dd932. 2024-12-16T21:03:00,014 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegion(3040): Finished flush of dataSize ~3.19 KB/3271, heapSize ~7.13 KB/7296, currentSize=0 B/0 for bd59e4492efad7b56f6a95b7ff86005c in 38ms, sequenceid=6, compaction requested=false 2024-12-16T21:03:00,014 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=189 2024-12-16T21:03:00,014 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegion(2538): Flush status journal for bd59e4492efad7b56f6a95b7ff86005c: 2024-12-16T21:03:00,014 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1734382978843.bd59e4492efad7b56f6a95b7ff86005c. for snaptb0-testExportWithChecksum completed. 2024-12-16T21:03:00,015 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1734382978843.bd59e4492efad7b56f6a95b7ff86005c.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-16T21:03:00,015 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T21:03:00,015 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithChecksum/bd59e4492efad7b56f6a95b7ff86005c/cf/a7935e9cc6394b6bb4d2dfd83b2df1ac] hfiles 2024-12-16T21:03:00,015 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithChecksum/bd59e4492efad7b56f6a95b7ff86005c/cf/a7935e9cc6394b6bb4d2dfd83b2df1ac for snapshot=snaptb0-testExportWithChecksum 2024-12-16T21:03:00,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster(4106): Remote procedure done, pid=189 2024-12-16T21:03:00,015 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 49b5609b2a32ef56940986b9d60dd932 2024-12-16T21:03:00,015 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=189, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 49b5609b2a32ef56940986b9d60dd932 2024-12-16T21:03:00,016 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=189, ppid=188, state=SUCCESS; SnapshotRegionProcedure 49b5609b2a32ef56940986b9d60dd932 in 191 msec 2024-12-16T21:03:00,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742248_1424 (size=107) 2024-12-16T21:03:00,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742248_1424 (size=107) 2024-12-16T21:03:00,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742248_1424 (size=107) 2024-12-16T21:03:00,020 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1734382978843.bd59e4492efad7b56f6a95b7ff86005c. 2024-12-16T21:03:00,020 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=190 2024-12-16T21:03:00,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster(4106): Remote procedure done, pid=190 2024-12-16T21:03:00,021 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region bd59e4492efad7b56f6a95b7ff86005c 2024-12-16T21:03:00,021 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=190, ppid=188, state=RUNNABLE; SnapshotRegionProcedure bd59e4492efad7b56f6a95b7ff86005c 2024-12-16T21:03:00,023 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=190, resume processing ppid=188 2024-12-16T21:03:00,023 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=190, ppid=188, state=SUCCESS; SnapshotRegionProcedure bd59e4492efad7b56f6a95b7ff86005c in 198 msec 2024-12-16T21:03:00,023 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T21:03:00,023 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T21:03:00,024 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T21:03:00,024 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithChecksum 2024-12-16T21:03:00,025 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-16T21:03:00,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742249_1425 (size=621) 2024-12-16T21:03:00,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742249_1425 (size=621) 2024-12-16T21:03:00,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742249_1425 (size=621) 2024-12-16T21:03:00,033 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T21:03:00,037 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T21:03:00,037 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-16T21:03:00,038 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T21:03:00,038 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 188 2024-12-16T21:03:00,039 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=188, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 225 msec 2024-12-16T21:03:00,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=188 2024-12-16T21:03:00,116 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum, procId: 188 completed 2024-12-16T21:03:00,116 INFO [Time-limited test {}] snapshot.TestExportSnapshot(476): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/local-export-1734382980116 2024-12-16T21:03:00,116 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/local-export-1734382980116, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/local-export-1734382980116, srcFsUri=hdfs://localhost:37263, srcDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:03:00,145 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:37263, inputRoot=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:03:00,145 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=org.apache.hadoop.fs.LocalFileSystem@383a8f10, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/local-export-1734382980116, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/local-export-1734382980116/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-16T21:03:00,146 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-16T21:03:00,149 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testExportWithChecksum to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/local-export-1734382980116/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-16T21:03:00,171 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-16T21:03:00,171 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-16T21:03:00,171 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-16T21:03:00,172 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-16T21:03:00,871 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-16T21:03:00,871 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum Metrics about Tables on a single HBase RegionServer 2024-12-16T21:03:00,872 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-16T21:03:01,141 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/hadoop-3698755633264396955.jar 2024-12-16T21:03:01,141 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-16T21:03:01,142 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-16T21:03:01,209 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/hadoop-6193598144430544526.jar 2024-12-16T21:03:01,209 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-16T21:03:01,209 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-16T21:03:01,210 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-16T21:03:01,210 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-16T21:03:01,210 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-16T21:03:01,210 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-16T21:03:01,210 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-16T21:03:01,211 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-16T21:03:01,211 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-16T21:03:01,211 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-16T21:03:01,211 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-16T21:03:01,211 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-16T21:03:01,211 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-16T21:03:01,212 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-16T21:03:01,212 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-16T21:03:01,212 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-16T21:03:01,212 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-16T21:03:01,212 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-16T21:03:01,213 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T21:03:01,213 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T21:03:01,213 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T21:03:01,213 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T21:03:01,214 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T21:03:01,214 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T21:03:01,214 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T21:03:01,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742250_1426 (size=127628) 2024-12-16T21:03:01,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742250_1426 (size=127628) 2024-12-16T21:03:01,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742250_1426 (size=127628) 2024-12-16T21:03:01,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742251_1427 (size=2172137) 2024-12-16T21:03:01,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742251_1427 (size=2172137) 2024-12-16T21:03:01,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742251_1427 (size=2172137) 2024-12-16T21:03:01,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742252_1428 (size=213228) 2024-12-16T21:03:01,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742252_1428 (size=213228) 2024-12-16T21:03:01,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742252_1428 (size=213228) 2024-12-16T21:03:01,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742253_1429 (size=1877034) 2024-12-16T21:03:01,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742253_1429 (size=1877034) 2024-12-16T21:03:01,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742253_1429 (size=1877034) 2024-12-16T21:03:01,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742254_1430 (size=533455) 2024-12-16T21:03:01,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742254_1430 (size=533455) 2024-12-16T21:03:01,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742254_1430 (size=533455) 2024-12-16T21:03:01,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742255_1431 (size=7280644) 2024-12-16T21:03:01,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742255_1431 (size=7280644) 2024-12-16T21:03:01,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742255_1431 (size=7280644) 2024-12-16T21:03:01,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742256_1432 (size=4188619) 2024-12-16T21:03:01,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742256_1432 (size=4188619) 2024-12-16T21:03:01,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742256_1432 (size=4188619) 2024-12-16T21:03:01,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742257_1433 (size=20406) 2024-12-16T21:03:01,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742257_1433 (size=20406) 2024-12-16T21:03:01,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742257_1433 (size=20406) 2024-12-16T21:03:01,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742258_1434 (size=75495) 2024-12-16T21:03:01,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742258_1434 (size=75495) 2024-12-16T21:03:01,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742258_1434 (size=75495) 2024-12-16T21:03:01,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742259_1435 (size=451756) 2024-12-16T21:03:01,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742259_1435 (size=451756) 2024-12-16T21:03:01,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742259_1435 (size=451756) 2024-12-16T21:03:01,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742260_1436 (size=45609) 2024-12-16T21:03:01,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742260_1436 (size=45609) 2024-12-16T21:03:01,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742260_1436 (size=45609) 2024-12-16T21:03:01,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742261_1437 (size=110084) 2024-12-16T21:03:01,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742261_1437 (size=110084) 2024-12-16T21:03:01,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742261_1437 (size=110084) 2024-12-16T21:03:01,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742262_1438 (size=1323991) 2024-12-16T21:03:01,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742262_1438 (size=1323991) 2024-12-16T21:03:01,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742262_1438 (size=1323991) 2024-12-16T21:03:01,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742263_1439 (size=23076) 2024-12-16T21:03:01,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742263_1439 (size=23076) 2024-12-16T21:03:01,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742263_1439 (size=23076) 2024-12-16T21:03:01,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742264_1440 (size=126803) 2024-12-16T21:03:01,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742264_1440 (size=126803) 2024-12-16T21:03:01,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742264_1440 (size=126803) 2024-12-16T21:03:01,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742265_1441 (size=322274) 2024-12-16T21:03:01,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742265_1441 (size=322274) 2024-12-16T21:03:01,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742265_1441 (size=322274) 2024-12-16T21:03:01,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742266_1442 (size=1832290) 2024-12-16T21:03:01,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742266_1442 (size=1832290) 2024-12-16T21:03:01,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742266_1442 (size=1832290) 2024-12-16T21:03:01,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742267_1443 (size=30081) 2024-12-16T21:03:01,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742267_1443 (size=30081) 2024-12-16T21:03:01,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742267_1443 (size=30081) 2024-12-16T21:03:01,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742268_1444 (size=53616) 2024-12-16T21:03:01,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742268_1444 (size=53616) 2024-12-16T21:03:01,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742268_1444 (size=53616) 2024-12-16T21:03:01,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742269_1445 (size=29229) 2024-12-16T21:03:01,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742269_1445 (size=29229) 2024-12-16T21:03:01,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742269_1445 (size=29229) 2024-12-16T21:03:01,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742270_1446 (size=169089) 2024-12-16T21:03:01,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742270_1446 (size=169089) 2024-12-16T21:03:01,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742270_1446 (size=169089) 2024-12-16T21:03:01,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742271_1447 (size=6350920) 2024-12-16T21:03:01,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742271_1447 (size=6350920) 2024-12-16T21:03:01,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742271_1447 (size=6350920) 2024-12-16T21:03:01,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742272_1448 (size=5175431) 2024-12-16T21:03:01,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742272_1448 (size=5175431) 2024-12-16T21:03:01,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742272_1448 (size=5175431) 2024-12-16T21:03:01,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742273_1449 (size=136454) 2024-12-16T21:03:01,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742273_1449 (size=136454) 2024-12-16T21:03:01,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742273_1449 (size=136454) 2024-12-16T21:03:01,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742274_1450 (size=907465) 2024-12-16T21:03:01,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742274_1450 (size=907465) 2024-12-16T21:03:01,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742274_1450 (size=907465) 2024-12-16T21:03:01,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742275_1451 (size=3317408) 2024-12-16T21:03:01,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742275_1451 (size=3317408) 2024-12-16T21:03:01,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742275_1451 (size=3317408) 2024-12-16T21:03:01,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742276_1452 (size=503880) 2024-12-16T21:03:01,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742276_1452 (size=503880) 2024-12-16T21:03:01,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742276_1452 (size=503880) 2024-12-16T21:03:01,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742277_1453 (size=4695811) 2024-12-16T21:03:01,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742277_1453 (size=4695811) 2024-12-16T21:03:01,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742277_1453 (size=4695811) 2024-12-16T21:03:01,548 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-16T21:03:01,549 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-16T21:03:01,551 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-16T21:03:01,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742278_1454 (size=338) 2024-12-16T21:03:01,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742278_1454 (size=338) 2024-12-16T21:03:01,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742278_1454 (size=338) 2024-12-16T21:03:01,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742279_1455 (size=15) 2024-12-16T21:03:01,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742279_1455 (size=15) 2024-12-16T21:03:01,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742279_1455 (size=15) 2024-12-16T21:03:01,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742280_1456 (size=304929) 2024-12-16T21:03:01,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742280_1456 (size=304929) 2024-12-16T21:03:01,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742280_1456 (size=304929) 2024-12-16T21:03:02,561 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-16T21:03:02,561 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-16T21:03:02,564 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0007_000001 (auth:SIMPLE) from 127.0.0.1:37026 2024-12-16T21:03:02,580 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-1_1/usercache/jenkins/appcache/application_1734382828217_0007/container_1734382828217_0007_01_000001/launch_container.sh] 2024-12-16T21:03:02,580 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-1_1/usercache/jenkins/appcache/application_1734382828217_0007/container_1734382828217_0007_01_000001/container_tokens] 2024-12-16T21:03:02,580 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-1_1/usercache/jenkins/appcache/application_1734382828217_0007/container_1734382828217_0007_01_000001/sysfs] 2024-12-16T21:03:02,666 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0008_000001 (auth:SIMPLE) from 127.0.0.1:32880 2024-12-16T21:03:04,067 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-16T21:03:08,307 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0008_000001 (auth:SIMPLE) from 127.0.0.1:34446 2024-12-16T21:03:08,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742281_1457 (size=350603) 2024-12-16T21:03:08,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742281_1457 (size=350603) 2024-12-16T21:03:08,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742281_1457 (size=350603) 2024-12-16T21:03:10,564 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0008_000001 (auth:SIMPLE) from 127.0.0.1:41916 2024-12-16T21:03:14,093 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-1_1/usercache/jenkins/appcache/application_1734382828217_0008/container_1734382828217_0008_01_000002/launch_container.sh] 2024-12-16T21:03:14,093 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-1_1/usercache/jenkins/appcache/application_1734382828217_0008/container_1734382828217_0008_01_000002/container_tokens] 2024-12-16T21:03:14,093 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-1_1/usercache/jenkins/appcache/application_1734382828217_0008/container_1734382828217_0008_01_000002/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithChecksum/bd59e4492efad7b56f6a95b7ff86005c/cf/a7935e9cc6394b6bb4d2dfd83b2df1ac and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/local-export-1734382980116/archive/data/default/testtb-testExportWithChecksum/bd59e4492efad7b56f6a95b7ff86005c/cf/a7935e9cc6394b6bb4d2dfd83b2df1ac. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:596) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:332) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:254) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:180) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-16T21:03:15,400 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0008_000001 (auth:SIMPLE) from 127.0.0.1:35388 2024-12-16T21:03:18,379 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-0_1/usercache/jenkins/appcache/application_1734382828217_0008/container_1734382828217_0008_01_000003/launch_container.sh] 2024-12-16T21:03:18,379 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-0_1/usercache/jenkins/appcache/application_1734382828217_0008/container_1734382828217_0008_01_000003/container_tokens] 2024-12-16T21:03:18,379 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-0_1/usercache/jenkins/appcache/application_1734382828217_0008/container_1734382828217_0008_01_000003/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithChecksum/bd59e4492efad7b56f6a95b7ff86005c/cf/a7935e9cc6394b6bb4d2dfd83b2df1ac and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/local-export-1734382980116/archive/data/default/testtb-testExportWithChecksum/bd59e4492efad7b56f6a95b7ff86005c/cf/a7935e9cc6394b6bb4d2dfd83b2df1ac. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:596) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:332) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:254) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:180) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-16T21:03:19,283 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T21:03:19,411 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0008_000001 (auth:SIMPLE) from 127.0.0.1:41930 2024-12-16T21:03:20,180 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 3f3089e09ebc056cacee8e9b5d95cda7, had cached 0 bytes from a total of 8392 2024-12-16T21:03:20,180 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 5700cdf8b31b01a35a4ac5bf632afccd, had cached 0 bytes from a total of 5216 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithChecksum/bd59e4492efad7b56f6a95b7ff86005c/cf/a7935e9cc6394b6bb4d2dfd83b2df1ac and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/local-export-1734382980116/archive/data/default/testtb-testExportWithChecksum/bd59e4492efad7b56f6a95b7ff86005c/cf/a7935e9cc6394b6bb4d2dfd83b2df1ac. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:596) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:332) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:254) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:180) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-16T21:03:22,770 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-1_1/usercache/jenkins/appcache/application_1734382828217_0008/container_1734382828217_0008_01_000004/launch_container.sh] 2024-12-16T21:03:22,770 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-1_1/usercache/jenkins/appcache/application_1734382828217_0008/container_1734382828217_0008_01_000004/container_tokens] 2024-12-16T21:03:22,770 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-1_1/usercache/jenkins/appcache/application_1734382828217_0008/container_1734382828217_0008_01_000004/sysfs] 2024-12-16T21:03:24,425 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0008_000001 (auth:SIMPLE) from 127.0.0.1:45990 2024-12-16T21:03:24,537 DEBUG [master/6b259cf15929:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region bd59e4492efad7b56f6a95b7ff86005c changed from -1.0 to 0.0, refreshing cache 2024-12-16T21:03:24,537 DEBUG [master/6b259cf15929:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 3f3089e09ebc056cacee8e9b5d95cda7 changed from -1.0 to 0.0, refreshing cache 2024-12-16T21:03:24,537 DEBUG [master/6b259cf15929:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 5700cdf8b31b01a35a4ac5bf632afccd changed from -1.0 to 0.0, refreshing cache 2024-12-16T21:03:24,537 DEBUG [master/6b259cf15929:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 49b5609b2a32ef56940986b9d60dd932 changed from -1.0 to 0.0, refreshing cache 2024-12-16T21:03:27,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742282_1458 (size=21340) 2024-12-16T21:03:27,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742282_1458 (size=21340) 2024-12-16T21:03:27,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742282_1458 (size=21340) 2024-12-16T21:03:27,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742283_1459 (size=460) 2024-12-16T21:03:27,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742283_1459 (size=460) 2024-12-16T21:03:27,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742283_1459 (size=460) 2024-12-16T21:03:27,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742284_1460 (size=21340) 2024-12-16T21:03:27,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742284_1460 (size=21340) 2024-12-16T21:03:27,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742284_1460 (size=21340) 2024-12-16T21:03:27,618 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-1_0/usercache/jenkins/appcache/application_1734382828217_0008/container_1734382828217_0008_01_000005/launch_container.sh] 2024-12-16T21:03:27,618 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-1_0/usercache/jenkins/appcache/application_1734382828217_0008/container_1734382828217_0008_01_000005/container_tokens] 2024-12-16T21:03:27,618 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-1_0/usercache/jenkins/appcache/application_1734382828217_0008/container_1734382828217_0008_01_000005/sysfs] 2024-12-16T21:03:27,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742285_1461 (size=350603) 2024-12-16T21:03:27,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742285_1461 (size=350603) 2024-12-16T21:03:27,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742285_1461 (size=350603) 2024-12-16T21:03:27,632 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0008_000001 (auth:SIMPLE) from 127.0.0.1:45996 2024-12-16T21:03:28,771 ERROR [Time-limited test {}] snapshot.ExportSnapshot(1227): Snapshot export failed org.apache.hadoop.hbase.snapshot.ExportSnapshotException: Task failed task_1734382828217_0008_m_000000 Job failed as tasks failed. failedMaps:1 failedReduces:0 killedMaps:0 killedReduces: 0 at org.apache.hadoop.hbase.snapshot.ExportSnapshot.runCopyJob(ExportSnapshot.java:935) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1204) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:151) ~[hbase-common-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:523) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemState(TestExportSnapshot.java:353) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportWithChecksum(TestExportSnapshot.java:237) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T21:03:28,772 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734383008772 2024-12-16T21:03:28,772 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:37263, tgtDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734383008772, rawTgtDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734383008772, srcFsUri=hdfs://localhost:37263, srcDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:03:28,799 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:37263, inputRoot=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:03:28,799 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-715246216_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734383008772, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734383008772/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-16T21:03:28,801 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-16T21:03:28,804 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testExportWithChecksum to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734383008772/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-16T21:03:28,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742286_1462 (size=156) 2024-12-16T21:03:28,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742286_1462 (size=156) 2024-12-16T21:03:28,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742287_1463 (size=621) 2024-12-16T21:03:28,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742287_1463 (size=621) 2024-12-16T21:03:28,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742286_1462 (size=156) 2024-12-16T21:03:28,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742287_1463 (size=621) 2024-12-16T21:03:28,814 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-16T21:03:28,815 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-16T21:03:28,815 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-16T21:03:28,815 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-16T21:03:29,824 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/hadoop-16242737477398332062.jar 2024-12-16T21:03:29,825 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-16T21:03:29,825 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-16T21:03:29,895 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/hadoop-4781764408956537506.jar 2024-12-16T21:03:29,896 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-16T21:03:29,896 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-16T21:03:29,896 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-16T21:03:29,896 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-16T21:03:29,897 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-16T21:03:29,897 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-16T21:03:29,897 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-16T21:03:29,897 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-16T21:03:29,897 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-16T21:03:29,897 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-16T21:03:29,898 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-16T21:03:29,898 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-16T21:03:29,898 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-16T21:03:29,898 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-16T21:03:29,898 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-16T21:03:29,899 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-16T21:03:29,899 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-16T21:03:29,899 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-16T21:03:29,899 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T21:03:29,899 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T21:03:29,900 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T21:03:29,900 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T21:03:29,900 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T21:03:29,900 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T21:03:29,900 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T21:03:29,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742288_1464 (size=6350920) 2024-12-16T21:03:29,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742288_1464 (size=6350920) 2024-12-16T21:03:29,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742288_1464 (size=6350920) 2024-12-16T21:03:29,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742289_1465 (size=127628) 2024-12-16T21:03:29,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742289_1465 (size=127628) 2024-12-16T21:03:29,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742289_1465 (size=127628) 2024-12-16T21:03:29,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742290_1466 (size=2172137) 2024-12-16T21:03:29,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742290_1466 (size=2172137) 2024-12-16T21:03:29,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742290_1466 (size=2172137) 2024-12-16T21:03:30,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742291_1467 (size=213228) 2024-12-16T21:03:30,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742291_1467 (size=213228) 2024-12-16T21:03:30,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742291_1467 (size=213228) 2024-12-16T21:03:30,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742292_1468 (size=1877034) 2024-12-16T21:03:30,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742292_1468 (size=1877034) 2024-12-16T21:03:30,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742292_1468 (size=1877034) 2024-12-16T21:03:30,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742293_1469 (size=533455) 2024-12-16T21:03:30,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742293_1469 (size=533455) 2024-12-16T21:03:30,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742293_1469 (size=533455) 2024-12-16T21:03:30,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742294_1470 (size=7280644) 2024-12-16T21:03:30,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742294_1470 (size=7280644) 2024-12-16T21:03:30,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742294_1470 (size=7280644) 2024-12-16T21:03:30,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742295_1471 (size=4188619) 2024-12-16T21:03:30,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742295_1471 (size=4188619) 2024-12-16T21:03:30,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742295_1471 (size=4188619) 2024-12-16T21:03:30,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742296_1472 (size=20406) 2024-12-16T21:03:30,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742296_1472 (size=20406) 2024-12-16T21:03:30,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742296_1472 (size=20406) 2024-12-16T21:03:30,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742297_1473 (size=75495) 2024-12-16T21:03:30,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742297_1473 (size=75495) 2024-12-16T21:03:30,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742297_1473 (size=75495) 2024-12-16T21:03:30,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742298_1474 (size=45609) 2024-12-16T21:03:30,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742298_1474 (size=45609) 2024-12-16T21:03:30,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742298_1474 (size=45609) 2024-12-16T21:03:30,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742299_1475 (size=110084) 2024-12-16T21:03:30,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742299_1475 (size=110084) 2024-12-16T21:03:30,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742299_1475 (size=110084) 2024-12-16T21:03:30,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742300_1476 (size=451756) 2024-12-16T21:03:30,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742300_1476 (size=451756) 2024-12-16T21:03:30,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742300_1476 (size=451756) 2024-12-16T21:03:30,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742301_1477 (size=1323991) 2024-12-16T21:03:30,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742301_1477 (size=1323991) 2024-12-16T21:03:30,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742301_1477 (size=1323991) 2024-12-16T21:03:30,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742302_1478 (size=23076) 2024-12-16T21:03:30,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742302_1478 (size=23076) 2024-12-16T21:03:30,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742302_1478 (size=23076) 2024-12-16T21:03:30,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742303_1479 (size=126803) 2024-12-16T21:03:30,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742303_1479 (size=126803) 2024-12-16T21:03:30,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742303_1479 (size=126803) 2024-12-16T21:03:30,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742304_1480 (size=322274) 2024-12-16T21:03:30,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742304_1480 (size=322274) 2024-12-16T21:03:30,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742304_1480 (size=322274) 2024-12-16T21:03:30,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742305_1481 (size=1832290) 2024-12-16T21:03:30,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742305_1481 (size=1832290) 2024-12-16T21:03:30,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742305_1481 (size=1832290) 2024-12-16T21:03:30,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742306_1482 (size=30081) 2024-12-16T21:03:30,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742306_1482 (size=30081) 2024-12-16T21:03:30,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742306_1482 (size=30081) 2024-12-16T21:03:30,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742307_1483 (size=53616) 2024-12-16T21:03:30,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742307_1483 (size=53616) 2024-12-16T21:03:30,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742307_1483 (size=53616) 2024-12-16T21:03:30,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742308_1484 (size=29229) 2024-12-16T21:03:30,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742308_1484 (size=29229) 2024-12-16T21:03:30,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742308_1484 (size=29229) 2024-12-16T21:03:30,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742309_1485 (size=169089) 2024-12-16T21:03:30,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742309_1485 (size=169089) 2024-12-16T21:03:30,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742309_1485 (size=169089) 2024-12-16T21:03:30,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742310_1486 (size=5175431) 2024-12-16T21:03:30,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742310_1486 (size=5175431) 2024-12-16T21:03:30,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742310_1486 (size=5175431) 2024-12-16T21:03:30,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742311_1487 (size=136454) 2024-12-16T21:03:30,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742311_1487 (size=136454) 2024-12-16T21:03:30,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742311_1487 (size=136454) 2024-12-16T21:03:30,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742312_1488 (size=907465) 2024-12-16T21:03:30,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742312_1488 (size=907465) 2024-12-16T21:03:30,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742312_1488 (size=907465) 2024-12-16T21:03:30,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742313_1489 (size=3317408) 2024-12-16T21:03:30,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742313_1489 (size=3317408) 2024-12-16T21:03:30,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742313_1489 (size=3317408) 2024-12-16T21:03:30,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742314_1490 (size=503880) 2024-12-16T21:03:30,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742314_1490 (size=503880) 2024-12-16T21:03:30,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742314_1490 (size=503880) 2024-12-16T21:03:30,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742315_1491 (size=4695811) 2024-12-16T21:03:30,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742315_1491 (size=4695811) 2024-12-16T21:03:30,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742315_1491 (size=4695811) 2024-12-16T21:03:30,354 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-16T21:03:30,355 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-16T21:03:30,357 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-16T21:03:30,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742316_1492 (size=338) 2024-12-16T21:03:30,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742316_1492 (size=338) 2024-12-16T21:03:30,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742316_1492 (size=338) 2024-12-16T21:03:30,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742317_1493 (size=15) 2024-12-16T21:03:30,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742317_1493 (size=15) 2024-12-16T21:03:30,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742317_1493 (size=15) 2024-12-16T21:03:30,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742318_1494 (size=304881) 2024-12-16T21:03:30,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742318_1494 (size=304881) 2024-12-16T21:03:30,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742318_1494 (size=304881) 2024-12-16T21:03:33,702 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-16T21:03:33,702 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-16T21:03:33,705 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0008_000001 (auth:SIMPLE) from 127.0.0.1:34352 2024-12-16T21:03:33,719 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-0_1/usercache/jenkins/appcache/application_1734382828217_0008/container_1734382828217_0008_01_000001/launch_container.sh] 2024-12-16T21:03:33,719 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-0_1/usercache/jenkins/appcache/application_1734382828217_0008/container_1734382828217_0008_01_000001/container_tokens] 2024-12-16T21:03:33,719 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-0_1/usercache/jenkins/appcache/application_1734382828217_0008/container_1734382828217_0008_01_000001/sysfs] 2024-12-16T21:03:34,610 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0009_000001 (auth:SIMPLE) from 127.0.0.1:57192 2024-12-16T21:03:40,149 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0009_000001 (auth:SIMPLE) from 127.0.0.1:37302 2024-12-16T21:03:40,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742319_1495 (size=350555) 2024-12-16T21:03:40,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742319_1495 (size=350555) 2024-12-16T21:03:40,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742319_1495 (size=350555) 2024-12-16T21:03:42,425 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0009_000001 (auth:SIMPLE) from 127.0.0.1:36504 2024-12-16T21:03:44,185 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region bd59e4492efad7b56f6a95b7ff86005c, had cached 0 bytes from a total of 8462 2024-12-16T21:03:44,185 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 49b5609b2a32ef56940986b9d60dd932, had cached 0 bytes from a total of 5149 2024-12-16T21:03:45,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742320_1496 (size=8462) 2024-12-16T21:03:45,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742320_1496 (size=8462) 2024-12-16T21:03:45,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742320_1496 (size=8462) 2024-12-16T21:03:45,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742321_1497 (size=5149) 2024-12-16T21:03:45,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742321_1497 (size=5149) 2024-12-16T21:03:45,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742321_1497 (size=5149) 2024-12-16T21:03:45,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742322_1498 (size=17413) 2024-12-16T21:03:45,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742322_1498 (size=17413) 2024-12-16T21:03:45,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742322_1498 (size=17413) 2024-12-16T21:03:45,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742323_1499 (size=462) 2024-12-16T21:03:45,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742323_1499 (size=462) 2024-12-16T21:03:45,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742323_1499 (size=462) 2024-12-16T21:03:45,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742324_1500 (size=17413) 2024-12-16T21:03:45,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742324_1500 (size=17413) 2024-12-16T21:03:45,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742324_1500 (size=17413) 2024-12-16T21:03:45,836 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-1_2/usercache/jenkins/appcache/application_1734382828217_0009/container_1734382828217_0009_01_000002/launch_container.sh] 2024-12-16T21:03:45,836 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-1_2/usercache/jenkins/appcache/application_1734382828217_0009/container_1734382828217_0009_01_000002/container_tokens] 2024-12-16T21:03:45,836 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-1_2/usercache/jenkins/appcache/application_1734382828217_0009/container_1734382828217_0009_01_000002/sysfs] 2024-12-16T21:03:45,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742325_1501 (size=350555) 2024-12-16T21:03:45,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742325_1501 (size=350555) 2024-12-16T21:03:45,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742325_1501 (size=350555) 2024-12-16T21:03:45,853 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0009_000001 (auth:SIMPLE) from 127.0.0.1:36506 2024-12-16T21:03:47,524 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-16T21:03:47,525 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-16T21:03:47,531 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testExportWithChecksum 2024-12-16T21:03:47,531 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-16T21:03:47,531 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-16T21:03:47,531 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-715246216_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-16T21:03:47,532 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-16T21:03:47,532 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-16T21:03:47,532 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-715246216_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734383008772/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734383008772/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-16T21:03:47,532 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734383008772/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-16T21:03:47,532 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734383008772/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-16T21:03:47,538 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportWithChecksum 2024-12-16T21:03:47,538 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportWithChecksum 2024-12-16T21:03:47,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=191, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportWithChecksum 2024-12-16T21:03:47,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-12-16T21:03:47,540 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734383027540"}]},"ts":"1734383027540"} 2024-12-16T21:03:47,541 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithChecksum, state=DISABLING in hbase:meta 2024-12-16T21:03:47,544 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithChecksum to state=DISABLING 2024-12-16T21:03:47,544 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=192, ppid=191, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportWithChecksum}] 2024-12-16T21:03:47,545 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=193, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=49b5609b2a32ef56940986b9d60dd932, UNASSIGN}, {pid=194, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=bd59e4492efad7b56f6a95b7ff86005c, UNASSIGN}] 2024-12-16T21:03:47,546 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=194, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=bd59e4492efad7b56f6a95b7ff86005c, UNASSIGN 2024-12-16T21:03:47,546 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=193, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=49b5609b2a32ef56940986b9d60dd932, UNASSIGN 2024-12-16T21:03:47,547 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=194 updating hbase:meta row=bd59e4492efad7b56f6a95b7ff86005c, regionState=CLOSING, regionLocation=6b259cf15929,36861,1734382821216 2024-12-16T21:03:47,547 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=193 updating hbase:meta row=49b5609b2a32ef56940986b9d60dd932, regionState=CLOSING, regionLocation=6b259cf15929,42323,1734382821139 2024-12-16T21:03:47,548 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T21:03:47,548 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=195, ppid=194, state=RUNNABLE; CloseRegionProcedure bd59e4492efad7b56f6a95b7ff86005c, server=6b259cf15929,36861,1734382821216}] 2024-12-16T21:03:47,548 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T21:03:47,548 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=196, ppid=193, state=RUNNABLE; CloseRegionProcedure 49b5609b2a32ef56940986b9d60dd932, server=6b259cf15929,42323,1734382821139}] 2024-12-16T21:03:47,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-12-16T21:03:47,700 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6b259cf15929,42323,1734382821139 2024-12-16T21:03:47,700 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6b259cf15929,36861,1734382821216 2024-12-16T21:03:47,700 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] handler.UnassignRegionHandler(124): Close 49b5609b2a32ef56940986b9d60dd932 2024-12-16T21:03:47,700 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] handler.UnassignRegionHandler(124): Close bd59e4492efad7b56f6a95b7ff86005c 2024-12-16T21:03:47,700 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T21:03:47,700 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T21:03:47,700 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1681): Closing bd59e4492efad7b56f6a95b7ff86005c, disabling compactions & flushes 2024-12-16T21:03:47,700 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1681): Closing 49b5609b2a32ef56940986b9d60dd932, disabling compactions & flushes 2024-12-16T21:03:47,700 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1703): Closing region testtb-testExportWithChecksum,1,1734382978843.bd59e4492efad7b56f6a95b7ff86005c. 2024-12-16T21:03:47,700 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1703): Closing region testtb-testExportWithChecksum,,1734382978843.49b5609b2a32ef56940986b9d60dd932. 2024-12-16T21:03:47,700 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithChecksum,1,1734382978843.bd59e4492efad7b56f6a95b7ff86005c. 2024-12-16T21:03:47,700 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithChecksum,1,1734382978843.bd59e4492efad7b56f6a95b7ff86005c. after waiting 0 ms 2024-12-16T21:03:47,700 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithChecksum,,1734382978843.49b5609b2a32ef56940986b9d60dd932. 2024-12-16T21:03:47,700 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithChecksum,1,1734382978843.bd59e4492efad7b56f6a95b7ff86005c. 2024-12-16T21:03:47,700 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithChecksum,,1734382978843.49b5609b2a32ef56940986b9d60dd932. after waiting 0 ms 2024-12-16T21:03:47,700 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithChecksum,,1734382978843.49b5609b2a32ef56940986b9d60dd932. 2024-12-16T21:03:47,705 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithChecksum/49b5609b2a32ef56940986b9d60dd932/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T21:03:47,705 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithChecksum/bd59e4492efad7b56f6a95b7ff86005c/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T21:03:47,705 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T21:03:47,705 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T21:03:47,705 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1922): Closed testtb-testExportWithChecksum,1,1734382978843.bd59e4492efad7b56f6a95b7ff86005c. 2024-12-16T21:03:47,705 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1922): Closed testtb-testExportWithChecksum,,1734382978843.49b5609b2a32ef56940986b9d60dd932. 2024-12-16T21:03:47,705 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1635): Region close journal for bd59e4492efad7b56f6a95b7ff86005c: 2024-12-16T21:03:47,705 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1635): Region close journal for 49b5609b2a32ef56940986b9d60dd932: 2024-12-16T21:03:47,707 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] handler.UnassignRegionHandler(170): Closed bd59e4492efad7b56f6a95b7ff86005c 2024-12-16T21:03:47,707 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=194 updating hbase:meta row=bd59e4492efad7b56f6a95b7ff86005c, regionState=CLOSED 2024-12-16T21:03:47,707 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] handler.UnassignRegionHandler(170): Closed 49b5609b2a32ef56940986b9d60dd932 2024-12-16T21:03:47,707 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=193 updating hbase:meta row=49b5609b2a32ef56940986b9d60dd932, regionState=CLOSED 2024-12-16T21:03:47,710 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=195, resume processing ppid=194 2024-12-16T21:03:47,710 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=196, resume processing ppid=193 2024-12-16T21:03:47,710 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=195, ppid=194, state=SUCCESS; CloseRegionProcedure bd59e4492efad7b56f6a95b7ff86005c, server=6b259cf15929,36861,1734382821216 in 160 msec 2024-12-16T21:03:47,711 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=196, ppid=193, state=SUCCESS; CloseRegionProcedure 49b5609b2a32ef56940986b9d60dd932, server=6b259cf15929,42323,1734382821139 in 161 msec 2024-12-16T21:03:47,711 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=194, ppid=192, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=bd59e4492efad7b56f6a95b7ff86005c, UNASSIGN in 165 msec 2024-12-16T21:03:47,712 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=193, resume processing ppid=192 2024-12-16T21:03:47,712 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=193, ppid=192, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=49b5609b2a32ef56940986b9d60dd932, UNASSIGN in 165 msec 2024-12-16T21:03:47,713 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=192, resume processing ppid=191 2024-12-16T21:03:47,713 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=192, ppid=191, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportWithChecksum in 168 msec 2024-12-16T21:03:47,714 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734383027714"}]},"ts":"1734383027714"} 2024-12-16T21:03:47,715 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithChecksum, state=DISABLED in hbase:meta 2024-12-16T21:03:47,717 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithChecksum to state=DISABLED 2024-12-16T21:03:47,718 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=191, state=SUCCESS; DisableTableProcedure table=testtb-testExportWithChecksum in 179 msec 2024-12-16T21:03:47,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-12-16T21:03:47,842 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportWithChecksum, procId: 191 completed 2024-12-16T21:03:47,842 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportWithChecksum 2024-12-16T21:03:47,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=197, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-16T21:03:47,844 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=197, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-16T21:03:47,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportWithChecksum 2024-12-16T21:03:47,844 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=197, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-16T21:03:47,845 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42323 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportWithChecksum 2024-12-16T21:03:47,847 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithChecksum/49b5609b2a32ef56940986b9d60dd932 2024-12-16T21:03:47,847 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithChecksum/bd59e4492efad7b56f6a95b7ff86005c 2024-12-16T21:03:47,848 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-16T21:03:47,848 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-16T21:03:47,848 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-16T21:03:47,848 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-16T21:03:47,849 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-16T21:03:47,849 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-16T21:03:47,849 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithChecksum/bd59e4492efad7b56f6a95b7ff86005c/cf, FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithChecksum/bd59e4492efad7b56f6a95b7ff86005c/recovered.edits] 2024-12-16T21:03:47,849 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-16T21:03:47,849 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-16T21:03:47,850 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithChecksum/49b5609b2a32ef56940986b9d60dd932/cf, FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithChecksum/49b5609b2a32ef56940986b9d60dd932/recovered.edits] 2024-12-16T21:03:47,850 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-16T21:03:47,850 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:03:47,850 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-16T21:03:47,851 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:03:47,851 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-16T21:03:47,851 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:03:47,851 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-16T21:03:47,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=197 2024-12-16T21:03:47,852 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:03:47,853 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T21:03:47,853 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T21:03:47,853 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T21:03:47,854 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T21:03:47,859 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithChecksum/bd59e4492efad7b56f6a95b7ff86005c/cf/a7935e9cc6394b6bb4d2dfd83b2df1ac to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testExportWithChecksum/bd59e4492efad7b56f6a95b7ff86005c/cf/a7935e9cc6394b6bb4d2dfd83b2df1ac 2024-12-16T21:03:47,859 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithChecksum/49b5609b2a32ef56940986b9d60dd932/cf/2652152be32845a1acc80780a7365aed to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testExportWithChecksum/49b5609b2a32ef56940986b9d60dd932/cf/2652152be32845a1acc80780a7365aed 2024-12-16T21:03:47,862 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithChecksum/bd59e4492efad7b56f6a95b7ff86005c/recovered.edits/9.seqid to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testExportWithChecksum/bd59e4492efad7b56f6a95b7ff86005c/recovered.edits/9.seqid 2024-12-16T21:03:47,863 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithChecksum/bd59e4492efad7b56f6a95b7ff86005c 2024-12-16T21:03:47,868 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithChecksum/49b5609b2a32ef56940986b9d60dd932/recovered.edits/9.seqid to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testExportWithChecksum/49b5609b2a32ef56940986b9d60dd932/recovered.edits/9.seqid 2024-12-16T21:03:47,868 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportWithChecksum/49b5609b2a32ef56940986b9d60dd932 2024-12-16T21:03:47,869 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithChecksum regions 2024-12-16T21:03:47,882 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=197, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-16T21:03:47,884 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportWithChecksum from hbase:meta 2024-12-16T21:03:47,887 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportWithChecksum' descriptor. 2024-12-16T21:03:47,888 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=197, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-16T21:03:47,888 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportWithChecksum' from region states. 2024-12-16T21:03:47,888 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,,1734382978843.49b5609b2a32ef56940986b9d60dd932.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734383027888"}]},"ts":"9223372036854775807"} 2024-12-16T21:03:47,888 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,1,1734382978843.bd59e4492efad7b56f6a95b7ff86005c.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734383027888"}]},"ts":"9223372036854775807"} 2024-12-16T21:03:47,891 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-16T21:03:47,891 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 49b5609b2a32ef56940986b9d60dd932, NAME => 'testtb-testExportWithChecksum,,1734382978843.49b5609b2a32ef56940986b9d60dd932.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => bd59e4492efad7b56f6a95b7ff86005c, NAME => 'testtb-testExportWithChecksum,1,1734382978843.bd59e4492efad7b56f6a95b7ff86005c.', STARTKEY => '1', ENDKEY => ''}] 2024-12-16T21:03:47,891 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportWithChecksum' as deleted. 2024-12-16T21:03:47,891 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734383027891"}]},"ts":"9223372036854775807"} 2024-12-16T21:03:47,903 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportWithChecksum state from META 2024-12-16T21:03:47,906 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=197, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-16T21:03:47,907 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=197, state=SUCCESS; DeleteTableProcedure table=testtb-testExportWithChecksum in 64 msec 2024-12-16T21:03:47,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=197 2024-12-16T21:03:47,952 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportWithChecksum, procId: 197 completed 2024-12-16T21:03:47,957 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithChecksum" 2024-12-16T21:03:47,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportWithChecksum 2024-12-16T21:03:47,960 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithChecksum" 2024-12-16T21:03:47,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportWithChecksum 2024-12-16T21:03:47,982 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=810 (was 813), OpenFileDescriptor=811 (was 823), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=400 (was 449), ProcessCount=17 (was 17), AvailableMemoryMB=2833 (was 3281) 2024-12-16T21:03:47,982 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=810 is superior to 500 2024-12-16T21:03:48,001 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=810, OpenFileDescriptor=811, MaxFileDescriptor=1048576, SystemLoadAverage=400, ProcessCount=17, AvailableMemoryMB=2833 2024-12-16T21:03:48,001 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=810 is superior to 500 2024-12-16T21:03:48,003 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T21:03:48,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=198, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T21:03:48,004 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T21:03:48,005 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:03:48,005 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSkipTmp" procId is: 198 2024-12-16T21:03:48,005 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T21:03:48,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=198 2024-12-16T21:03:48,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742326_1502 (size=418) 2024-12-16T21:03:48,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742326_1502 (size=418) 2024-12-16T21:03:48,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742326_1502 (size=418) 2024-12-16T21:03:48,013 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 6f189cc9aa16f8410beebe8f514f65fe, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1734383028003.6f189cc9aa16f8410beebe8f514f65fe.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:03:48,014 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => fb7ea73bc3c7323f3a0cafeb76228700, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1734383028003.fb7ea73bc3c7323f3a0cafeb76228700.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:03:48,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742327_1503 (size=79) 2024-12-16T21:03:48,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742327_1503 (size=79) 2024-12-16T21:03:48,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742327_1503 (size=79) 2024-12-16T21:03:48,027 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1734383028003.fb7ea73bc3c7323f3a0cafeb76228700.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:03:48,027 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1681): Closing fb7ea73bc3c7323f3a0cafeb76228700, disabling compactions & flushes 2024-12-16T21:03:48,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742328_1504 (size=79) 2024-12-16T21:03:48,027 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1734383028003.fb7ea73bc3c7323f3a0cafeb76228700. 2024-12-16T21:03:48,027 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1734383028003.fb7ea73bc3c7323f3a0cafeb76228700. 2024-12-16T21:03:48,027 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1734383028003.fb7ea73bc3c7323f3a0cafeb76228700. after waiting 0 ms 2024-12-16T21:03:48,027 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1734383028003.fb7ea73bc3c7323f3a0cafeb76228700. 2024-12-16T21:03:48,028 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1734383028003.fb7ea73bc3c7323f3a0cafeb76228700. 2024-12-16T21:03:48,028 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1635): Region close journal for fb7ea73bc3c7323f3a0cafeb76228700: 2024-12-16T21:03:48,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742328_1504 (size=79) 2024-12-16T21:03:48,028 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1734383028003.6f189cc9aa16f8410beebe8f514f65fe.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:03:48,028 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1681): Closing 6f189cc9aa16f8410beebe8f514f65fe, disabling compactions & flushes 2024-12-16T21:03:48,028 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1734383028003.6f189cc9aa16f8410beebe8f514f65fe. 2024-12-16T21:03:48,028 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1734383028003.6f189cc9aa16f8410beebe8f514f65fe. 2024-12-16T21:03:48,028 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1734383028003.6f189cc9aa16f8410beebe8f514f65fe. after waiting 0 ms 2024-12-16T21:03:48,028 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1734383028003.6f189cc9aa16f8410beebe8f514f65fe. 2024-12-16T21:03:48,028 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithSkipTmp,,1734383028003.6f189cc9aa16f8410beebe8f514f65fe. 2024-12-16T21:03:48,028 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1635): Region close journal for 6f189cc9aa16f8410beebe8f514f65fe: 2024-12-16T21:03:48,029 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T21:03:48,029 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1734383028003.fb7ea73bc3c7323f3a0cafeb76228700.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1734383028029"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734383028029"}]},"ts":"1734383028029"} 2024-12-16T21:03:48,029 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1734383028003.6f189cc9aa16f8410beebe8f514f65fe.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1734383028029"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734383028029"}]},"ts":"1734383028029"} 2024-12-16T21:03:48,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742328_1504 (size=79) 2024-12-16T21:03:48,031 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-16T21:03:48,032 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T21:03:48,032 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734383028032"}]},"ts":"1734383028032"} 2024-12-16T21:03:48,033 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLING in hbase:meta 2024-12-16T21:03:48,036 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(202): Hosts are {6b259cf15929=0} racks are {/default-rack=0} 2024-12-16T21:03:48,037 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-16T21:03:48,037 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-16T21:03:48,037 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-16T21:03:48,037 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-16T21:03:48,037 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-16T21:03:48,037 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-16T21:03:48,037 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-16T21:03:48,038 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=199, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=6f189cc9aa16f8410beebe8f514f65fe, ASSIGN}, {pid=200, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=fb7ea73bc3c7323f3a0cafeb76228700, ASSIGN}] 2024-12-16T21:03:48,038 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=200, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=fb7ea73bc3c7323f3a0cafeb76228700, ASSIGN 2024-12-16T21:03:48,038 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=199, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=6f189cc9aa16f8410beebe8f514f65fe, ASSIGN 2024-12-16T21:03:48,039 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=200, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=fb7ea73bc3c7323f3a0cafeb76228700, ASSIGN; state=OFFLINE, location=6b259cf15929,41713,1734382821274; forceNewPlan=false, retain=false 2024-12-16T21:03:48,039 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=199, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=6f189cc9aa16f8410beebe8f514f65fe, ASSIGN; state=OFFLINE, location=6b259cf15929,36861,1734382821216; forceNewPlan=false, retain=false 2024-12-16T21:03:48,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=198 2024-12-16T21:03:48,189 INFO [6b259cf15929:34343 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-16T21:03:48,189 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=199 updating hbase:meta row=6f189cc9aa16f8410beebe8f514f65fe, regionState=OPENING, regionLocation=6b259cf15929,36861,1734382821216 2024-12-16T21:03:48,189 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=200 updating hbase:meta row=fb7ea73bc3c7323f3a0cafeb76228700, regionState=OPENING, regionLocation=6b259cf15929,41713,1734382821274 2024-12-16T21:03:48,191 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=201, ppid=199, state=RUNNABLE; OpenRegionProcedure 6f189cc9aa16f8410beebe8f514f65fe, server=6b259cf15929,36861,1734382821216}] 2024-12-16T21:03:48,191 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=202, ppid=200, state=RUNNABLE; OpenRegionProcedure fb7ea73bc3c7323f3a0cafeb76228700, server=6b259cf15929,41713,1734382821274}] 2024-12-16T21:03:48,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=198 2024-12-16T21:03:48,342 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6b259cf15929,36861,1734382821216 2024-12-16T21:03:48,343 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6b259cf15929,41713,1734382821274 2024-12-16T21:03:48,345 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithSkipTmp,,1734383028003.6f189cc9aa16f8410beebe8f514f65fe. 2024-12-16T21:03:48,345 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7285): Opening region: {ENCODED => 6f189cc9aa16f8410beebe8f514f65fe, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1734383028003.6f189cc9aa16f8410beebe8f514f65fe.', STARTKEY => '', ENDKEY => '1'} 2024-12-16T21:03:48,345 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,,1734383028003.6f189cc9aa16f8410beebe8f514f65fe. service=AccessControlService 2024-12-16T21:03:48,346 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T21:03:48,346 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 6f189cc9aa16f8410beebe8f514f65fe 2024-12-16T21:03:48,346 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1734383028003.6f189cc9aa16f8410beebe8f514f65fe.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:03:48,346 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7327): checking encryption for 6f189cc9aa16f8410beebe8f514f65fe 2024-12-16T21:03:48,346 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7330): checking classloading for 6f189cc9aa16f8410beebe8f514f65fe 2024-12-16T21:03:48,346 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithSkipTmp,1,1734383028003.fb7ea73bc3c7323f3a0cafeb76228700. 2024-12-16T21:03:48,346 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7285): Opening region: {ENCODED => fb7ea73bc3c7323f3a0cafeb76228700, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1734383028003.fb7ea73bc3c7323f3a0cafeb76228700.', STARTKEY => '1', ENDKEY => ''} 2024-12-16T21:03:48,347 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,1,1734383028003.fb7ea73bc3c7323f3a0cafeb76228700. service=AccessControlService 2024-12-16T21:03:48,347 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T21:03:48,347 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp fb7ea73bc3c7323f3a0cafeb76228700 2024-12-16T21:03:48,347 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1734383028003.fb7ea73bc3c7323f3a0cafeb76228700.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T21:03:48,347 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7327): checking encryption for fb7ea73bc3c7323f3a0cafeb76228700 2024-12-16T21:03:48,347 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7330): checking classloading for fb7ea73bc3c7323f3a0cafeb76228700 2024-12-16T21:03:48,347 INFO [StoreOpener-6f189cc9aa16f8410beebe8f514f65fe-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 6f189cc9aa16f8410beebe8f514f65fe 2024-12-16T21:03:48,348 INFO [StoreOpener-fb7ea73bc3c7323f3a0cafeb76228700-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region fb7ea73bc3c7323f3a0cafeb76228700 2024-12-16T21:03:48,348 INFO [StoreOpener-6f189cc9aa16f8410beebe8f514f65fe-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6f189cc9aa16f8410beebe8f514f65fe columnFamilyName cf 2024-12-16T21:03:48,349 DEBUG [StoreOpener-6f189cc9aa16f8410beebe8f514f65fe-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:03:48,349 INFO [StoreOpener-6f189cc9aa16f8410beebe8f514f65fe-1 {}] regionserver.HStore(327): Store=6f189cc9aa16f8410beebe8f514f65fe/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T21:03:48,349 INFO [StoreOpener-fb7ea73bc3c7323f3a0cafeb76228700-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fb7ea73bc3c7323f3a0cafeb76228700 columnFamilyName cf 2024-12-16T21:03:48,349 DEBUG [StoreOpener-fb7ea73bc3c7323f3a0cafeb76228700-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T21:03:48,349 INFO [StoreOpener-fb7ea73bc3c7323f3a0cafeb76228700-1 {}] regionserver.HStore(327): Store=fb7ea73bc3c7323f3a0cafeb76228700/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T21:03:48,350 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithSkipTmp/6f189cc9aa16f8410beebe8f514f65fe 2024-12-16T21:03:48,350 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithSkipTmp/6f189cc9aa16f8410beebe8f514f65fe 2024-12-16T21:03:48,350 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithSkipTmp/fb7ea73bc3c7323f3a0cafeb76228700 2024-12-16T21:03:48,350 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithSkipTmp/fb7ea73bc3c7323f3a0cafeb76228700 2024-12-16T21:03:48,351 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(1085): writing seq id for 6f189cc9aa16f8410beebe8f514f65fe 2024-12-16T21:03:48,352 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(1085): writing seq id for fb7ea73bc3c7323f3a0cafeb76228700 2024-12-16T21:03:48,353 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithSkipTmp/6f189cc9aa16f8410beebe8f514f65fe/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T21:03:48,353 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithSkipTmp/fb7ea73bc3c7323f3a0cafeb76228700/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T21:03:48,353 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(1102): Opened 6f189cc9aa16f8410beebe8f514f65fe; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62475422, jitterRate=-0.06904366612434387}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T21:03:48,353 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(1102): Opened fb7ea73bc3c7323f3a0cafeb76228700; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62604736, jitterRate=-0.06711673736572266}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T21:03:48,354 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(1001): Region open journal for 6f189cc9aa16f8410beebe8f514f65fe: 2024-12-16T21:03:48,354 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(1001): Region open journal for fb7ea73bc3c7323f3a0cafeb76228700: 2024-12-16T21:03:48,354 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,1,1734383028003.fb7ea73bc3c7323f3a0cafeb76228700., pid=202, masterSystemTime=1734383028343 2024-12-16T21:03:48,354 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,,1734383028003.6f189cc9aa16f8410beebe8f514f65fe., pid=201, masterSystemTime=1734383028342 2024-12-16T21:03:48,355 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,1,1734383028003.fb7ea73bc3c7323f3a0cafeb76228700. 2024-12-16T21:03:48,355 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithSkipTmp,1,1734383028003.fb7ea73bc3c7323f3a0cafeb76228700. 2024-12-16T21:03:48,356 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=200 updating hbase:meta row=fb7ea73bc3c7323f3a0cafeb76228700, regionState=OPEN, openSeqNum=2, regionLocation=6b259cf15929,41713,1734382821274 2024-12-16T21:03:48,356 DEBUG [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,,1734383028003.6f189cc9aa16f8410beebe8f514f65fe. 2024-12-16T21:03:48,356 INFO [RS_OPEN_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithSkipTmp,,1734383028003.6f189cc9aa16f8410beebe8f514f65fe. 2024-12-16T21:03:48,356 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=199 updating hbase:meta row=6f189cc9aa16f8410beebe8f514f65fe, regionState=OPEN, openSeqNum=2, regionLocation=6b259cf15929,36861,1734382821216 2024-12-16T21:03:48,358 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=202, resume processing ppid=200 2024-12-16T21:03:48,359 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=201, resume processing ppid=199 2024-12-16T21:03:48,359 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=201, ppid=199, state=SUCCESS; OpenRegionProcedure 6f189cc9aa16f8410beebe8f514f65fe, server=6b259cf15929,36861,1734382821216 in 167 msec 2024-12-16T21:03:48,359 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=202, ppid=200, state=SUCCESS; OpenRegionProcedure fb7ea73bc3c7323f3a0cafeb76228700, server=6b259cf15929,41713,1734382821274 in 166 msec 2024-12-16T21:03:48,359 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=200, ppid=198, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=fb7ea73bc3c7323f3a0cafeb76228700, ASSIGN in 320 msec 2024-12-16T21:03:48,360 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=199, resume processing ppid=198 2024-12-16T21:03:48,360 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=199, ppid=198, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=6f189cc9aa16f8410beebe8f514f65fe, ASSIGN in 322 msec 2024-12-16T21:03:48,360 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T21:03:48,361 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734383028360"}]},"ts":"1734383028360"} 2024-12-16T21:03:48,361 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLED in hbase:meta 2024-12-16T21:03:48,363 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T21:03:48,364 DEBUG [PEWorker-5 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportFileSystemStateWithSkipTmp jenkins: RWXCA 2024-12-16T21:03:48,365 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42323 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-16T21:03:48,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:03:48,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:03:48,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:03:48,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:03:48,372 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T21:03:48,372 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T21:03:48,373 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T21:03:48,373 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-16T21:03:48,373 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-16T21:03:48,373 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-16T21:03:48,373 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T21:03:48,373 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-16T21:03:48,373 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=198, state=SUCCESS; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 369 msec 2024-12-16T21:03:48,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=198 2024-12-16T21:03:48,607 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 198 completed 2024-12-16T21:03:48,608 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportFileSystemStateWithSkipTmp get assigned. Timeout = 60000ms 2024-12-16T21:03:48,608 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T21:03:48,610 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41713 {}] regionserver.StoreScanner(1133): Switch to stream read (scanned=32795 bytes) of info 2024-12-16T21:03:48,614 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned to meta. Checking AM states. 2024-12-16T21:03:48,614 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T21:03:48,614 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned. 2024-12-16T21:03:48,616 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-16T21:03:48,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734383028616 (current time:1734383028616). 2024-12-16T21:03:48,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-16T21:03:48,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-16T21:03:48,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T21:03:48,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x41ce0e00 to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@319d3297 2024-12-16T21:03:48,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51102c5c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:03:48,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:03:48,622 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41240, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:03:48,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x41ce0e00 to 127.0.0.1:61742 2024-12-16T21:03:48,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:03:48,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6e1b4f36 to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@36cad1e8 2024-12-16T21:03:48,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66e869a1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:03:48,627 DEBUG [hconnection-0x1ea2f382-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:03:48,628 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41252, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:03:48,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:03:48,630 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59192, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:03:48,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6e1b4f36 to 127.0.0.1:61742 2024-12-16T21:03:48,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:03:48,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-16T21:03:48,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T21:03:48,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=203, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-16T21:03:48,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-16T21:03:48,632 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-16T21:03:48,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=203 2024-12-16T21:03:48,633 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T21:03:48,635 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T21:03:48,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742329_1505 (size=203) 2024-12-16T21:03:48,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742329_1505 (size=203) 2024-12-16T21:03:48,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742329_1505 (size=203) 2024-12-16T21:03:48,643 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T21:03:48,643 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=204, ppid=203, state=RUNNABLE; SnapshotRegionProcedure 6f189cc9aa16f8410beebe8f514f65fe}, {pid=205, ppid=203, state=RUNNABLE; SnapshotRegionProcedure fb7ea73bc3c7323f3a0cafeb76228700}] 2024-12-16T21:03:48,644 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=204, ppid=203, state=RUNNABLE; SnapshotRegionProcedure 6f189cc9aa16f8410beebe8f514f65fe 2024-12-16T21:03:48,644 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=205, ppid=203, state=RUNNABLE; SnapshotRegionProcedure fb7ea73bc3c7323f3a0cafeb76228700 2024-12-16T21:03:48,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=203 2024-12-16T21:03:48,795 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6b259cf15929,41713,1734382821274 2024-12-16T21:03:48,795 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6b259cf15929,36861,1734382821216 2024-12-16T21:03:48,796 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36861 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=204 2024-12-16T21:03:48,796 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41713 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=205 2024-12-16T21:03:48,796 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1734383028003.6f189cc9aa16f8410beebe8f514f65fe. 2024-12-16T21:03:48,796 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1734383028003.fb7ea73bc3c7323f3a0cafeb76228700. 2024-12-16T21:03:48,796 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2538): Flush status journal for 6f189cc9aa16f8410beebe8f514f65fe: 2024-12-16T21:03:48,796 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2538): Flush status journal for fb7ea73bc3c7323f3a0cafeb76228700: 2024-12-16T21:03:48,796 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1734383028003.6f189cc9aa16f8410beebe8f514f65fe. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-16T21:03:48,796 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1734383028003.fb7ea73bc3c7323f3a0cafeb76228700. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-16T21:03:48,796 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1734383028003.6f189cc9aa16f8410beebe8f514f65fe.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-16T21:03:48,796 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T21:03:48,796 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1734383028003.fb7ea73bc3c7323f3a0cafeb76228700.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-16T21:03:48,796 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-16T21:03:48,796 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T21:03:48,796 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-16T21:03:48,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742330_1506 (size=82) 2024-12-16T21:03:48,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742330_1506 (size=82) 2024-12-16T21:03:48,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742330_1506 (size=82) 2024-12-16T21:03:48,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742331_1507 (size=82) 2024-12-16T21:03:48,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742331_1507 (size=82) 2024-12-16T21:03:48,807 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1734383028003.fb7ea73bc3c7323f3a0cafeb76228700. 2024-12-16T21:03:48,807 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=205 2024-12-16T21:03:48,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742331_1507 (size=82) 2024-12-16T21:03:48,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster(4106): Remote procedure done, pid=205 2024-12-16T21:03:48,807 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region fb7ea73bc3c7323f3a0cafeb76228700 2024-12-16T21:03:48,807 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1734383028003.6f189cc9aa16f8410beebe8f514f65fe. 2024-12-16T21:03:48,807 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=204 2024-12-16T21:03:48,807 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=205, ppid=203, state=RUNNABLE; SnapshotRegionProcedure fb7ea73bc3c7323f3a0cafeb76228700 2024-12-16T21:03:48,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster(4106): Remote procedure done, pid=204 2024-12-16T21:03:48,808 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 6f189cc9aa16f8410beebe8f514f65fe 2024-12-16T21:03:48,808 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=204, ppid=203, state=RUNNABLE; SnapshotRegionProcedure 6f189cc9aa16f8410beebe8f514f65fe 2024-12-16T21:03:48,809 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=205, ppid=203, state=SUCCESS; SnapshotRegionProcedure fb7ea73bc3c7323f3a0cafeb76228700 in 165 msec 2024-12-16T21:03:48,810 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=204, resume processing ppid=203 2024-12-16T21:03:48,810 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=204, ppid=203, state=SUCCESS; SnapshotRegionProcedure 6f189cc9aa16f8410beebe8f514f65fe in 165 msec 2024-12-16T21:03:48,810 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T21:03:48,810 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T21:03:48,811 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T21:03:48,811 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-16T21:03:48,811 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-16T21:03:48,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742332_1508 (size=585) 2024-12-16T21:03:48,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742332_1508 (size=585) 2024-12-16T21:03:48,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742332_1508 (size=585) 2024-12-16T21:03:48,822 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T21:03:48,825 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T21:03:48,826 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-16T21:03:48,826 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T21:03:48,827 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-16T21:03:48,827 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=203, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 196 msec 2024-12-16T21:03:48,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=203 2024-12-16T21:03:48,934 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 203 completed 2024-12-16T21:03:48,940 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemStateWithSkipTmp,,1734383028003.6f189cc9aa16f8410beebe8f514f65fe. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T21:03:48,940 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41713 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemStateWithSkipTmp,1,1734383028003.fb7ea73bc3c7323f3a0cafeb76228700. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T21:03:48,943 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T21:03:48,943 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1734383028003.6f189cc9aa16f8410beebe8f514f65fe. 2024-12-16T21:03:48,943 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T21:03:48,951 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-16T21:03:48,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734383028951 (current time:1734383028951). 2024-12-16T21:03:48,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-16T21:03:48,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-16T21:03:48,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T21:03:48,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6f516122 to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@66e6a1a9 2024-12-16T21:03:48,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ff7306f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:03:48,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:03:48,957 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41268, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:03:48,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6f516122 to 127.0.0.1:61742 2024-12-16T21:03:48,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:03:48,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4cb85916 to 127.0.0.1:61742 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1f0052c1 2024-12-16T21:03:48,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49a26d06, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T21:03:48,963 DEBUG [hconnection-0x428787ba-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:03:48,964 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41284, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:03:48,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T21:03:48,965 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59206, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T21:03:48,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4cb85916 to 127.0.0.1:61742 2024-12-16T21:03:48,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:03:48,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-16T21:03:48,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T21:03:48,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=206, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-16T21:03:48,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 206 2024-12-16T21:03:48,968 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-16T21:03:48,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=206 2024-12-16T21:03:48,969 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T21:03:48,970 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T21:03:48,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742333_1509 (size=198) 2024-12-16T21:03:48,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742333_1509 (size=198) 2024-12-16T21:03:48,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742333_1509 (size=198) 2024-12-16T21:03:48,980 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T21:03:48,980 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=207, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 6f189cc9aa16f8410beebe8f514f65fe}, {pid=208, ppid=206, state=RUNNABLE; SnapshotRegionProcedure fb7ea73bc3c7323f3a0cafeb76228700}] 2024-12-16T21:03:48,981 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=208, ppid=206, state=RUNNABLE; SnapshotRegionProcedure fb7ea73bc3c7323f3a0cafeb76228700 2024-12-16T21:03:48,981 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=207, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 6f189cc9aa16f8410beebe8f514f65fe 2024-12-16T21:03:49,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=206 2024-12-16T21:03:49,132 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6b259cf15929,41713,1734382821274 2024-12-16T21:03:49,132 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6b259cf15929,36861,1734382821216 2024-12-16T21:03:49,132 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41713 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=208 2024-12-16T21:03:49,132 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36861 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=207 2024-12-16T21:03:49,132 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1734383028003.fb7ea73bc3c7323f3a0cafeb76228700. 2024-12-16T21:03:49,133 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1734383028003.6f189cc9aa16f8410beebe8f514f65fe. 2024-12-16T21:03:49,133 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegion(2837): Flushing 6f189cc9aa16f8410beebe8f514f65fe 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-16T21:03:49,133 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegion(2837): Flushing fb7ea73bc3c7323f3a0cafeb76228700 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-16T21:03:49,155 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithSkipTmp/fb7ea73bc3c7323f3a0cafeb76228700/.tmp/cf/c2a592812e054233b96c92d422092a42 is 71, key is 11102c0abaf738c38d82ff2af47232c4/cf:q/1734383028940/Put/seqid=0 2024-12-16T21:03:49,155 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithSkipTmp/6f189cc9aa16f8410beebe8f514f65fe/.tmp/cf/2f124ee6810f4c6599769a6403b16c52 is 71, key is 04019b517c578b431b7f71bc59cfb9f5/cf:q/1734383028939/Put/seqid=0 2024-12-16T21:03:49,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742334_1510 (size=5216) 2024-12-16T21:03:49,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742334_1510 (size=5216) 2024-12-16T21:03:49,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742335_1511 (size=8392) 2024-12-16T21:03:49,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742335_1511 (size=8392) 2024-12-16T21:03:49,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742334_1510 (size=5216) 2024-12-16T21:03:49,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742335_1511 (size=8392) 2024-12-16T21:03:49,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=206 2024-12-16T21:03:49,283 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T21:03:49,567 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithSkipTmp/6f189cc9aa16f8410beebe8f514f65fe/.tmp/cf/2f124ee6810f4c6599769a6403b16c52 2024-12-16T21:03:49,567 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithSkipTmp/fb7ea73bc3c7323f3a0cafeb76228700/.tmp/cf/c2a592812e054233b96c92d422092a42 2024-12-16T21:03:49,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=206 2024-12-16T21:03:49,571 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithSkipTmp/fb7ea73bc3c7323f3a0cafeb76228700/.tmp/cf/c2a592812e054233b96c92d422092a42 as hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithSkipTmp/fb7ea73bc3c7323f3a0cafeb76228700/cf/c2a592812e054233b96c92d422092a42 2024-12-16T21:03:49,572 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithSkipTmp/6f189cc9aa16f8410beebe8f514f65fe/.tmp/cf/2f124ee6810f4c6599769a6403b16c52 as hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithSkipTmp/6f189cc9aa16f8410beebe8f514f65fe/cf/2f124ee6810f4c6599769a6403b16c52 2024-12-16T21:03:49,575 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithSkipTmp/fb7ea73bc3c7323f3a0cafeb76228700/cf/c2a592812e054233b96c92d422092a42, entries=48, sequenceid=6, filesize=8.2 K 2024-12-16T21:03:49,575 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithSkipTmp/6f189cc9aa16f8410beebe8f514f65fe/cf/2f124ee6810f4c6599769a6403b16c52, entries=2, sequenceid=6, filesize=5.1 K 2024-12-16T21:03:49,576 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegion(3040): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 6f189cc9aa16f8410beebe8f514f65fe in 443ms, sequenceid=6, compaction requested=false 2024-12-16T21:03:49,576 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegion(3040): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for fb7ea73bc3c7323f3a0cafeb76228700 in 443ms, sequenceid=6, compaction requested=false 2024-12-16T21:03:49,576 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp' 2024-12-16T21:03:49,576 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp' 2024-12-16T21:03:49,577 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegion(2538): Flush status journal for 6f189cc9aa16f8410beebe8f514f65fe: 2024-12-16T21:03:49,577 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegion(2538): Flush status journal for fb7ea73bc3c7323f3a0cafeb76228700: 2024-12-16T21:03:49,577 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1734383028003.6f189cc9aa16f8410beebe8f514f65fe. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-16T21:03:49,577 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1734383028003.fb7ea73bc3c7323f3a0cafeb76228700. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-16T21:03:49,577 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1734383028003.fb7ea73bc3c7323f3a0cafeb76228700.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-16T21:03:49,577 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1734383028003.6f189cc9aa16f8410beebe8f514f65fe.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-16T21:03:49,577 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T21:03:49,577 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T21:03:49,577 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithSkipTmp/6f189cc9aa16f8410beebe8f514f65fe/cf/2f124ee6810f4c6599769a6403b16c52] hfiles 2024-12-16T21:03:49,577 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithSkipTmp/fb7ea73bc3c7323f3a0cafeb76228700/cf/c2a592812e054233b96c92d422092a42] hfiles 2024-12-16T21:03:49,577 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithSkipTmp/6f189cc9aa16f8410beebe8f514f65fe/cf/2f124ee6810f4c6599769a6403b16c52 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-16T21:03:49,577 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithSkipTmp/fb7ea73bc3c7323f3a0cafeb76228700/cf/c2a592812e054233b96c92d422092a42 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-16T21:03:49,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742337_1513 (size=121) 2024-12-16T21:03:49,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742337_1513 (size=121) 2024-12-16T21:03:49,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742337_1513 (size=121) 2024-12-16T21:03:49,589 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1734383028003.6f189cc9aa16f8410beebe8f514f65fe. 2024-12-16T21:03:49,589 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=207 2024-12-16T21:03:49,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster(4106): Remote procedure done, pid=207 2024-12-16T21:03:49,589 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 6f189cc9aa16f8410beebe8f514f65fe 2024-12-16T21:03:49,590 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=207, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 6f189cc9aa16f8410beebe8f514f65fe 2024-12-16T21:03:49,591 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=207, ppid=206, state=SUCCESS; SnapshotRegionProcedure 6f189cc9aa16f8410beebe8f514f65fe in 610 msec 2024-12-16T21:03:49,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742336_1512 (size=121) 2024-12-16T21:03:49,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742336_1512 (size=121) 2024-12-16T21:03:49,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742336_1512 (size=121) 2024-12-16T21:03:49,593 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1734383028003.fb7ea73bc3c7323f3a0cafeb76228700. 2024-12-16T21:03:49,594 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6b259cf15929:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=208 2024-12-16T21:03:49,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster(4106): Remote procedure done, pid=208 2024-12-16T21:03:49,594 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region fb7ea73bc3c7323f3a0cafeb76228700 2024-12-16T21:03:49,594 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=208, ppid=206, state=RUNNABLE; SnapshotRegionProcedure fb7ea73bc3c7323f3a0cafeb76228700 2024-12-16T21:03:49,596 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=208, resume processing ppid=206 2024-12-16T21:03:49,596 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=208, ppid=206, state=SUCCESS; SnapshotRegionProcedure fb7ea73bc3c7323f3a0cafeb76228700 in 614 msec 2024-12-16T21:03:49,596 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T21:03:49,596 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T21:03:49,596 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T21:03:49,597 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-16T21:03:49,597 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-16T21:03:49,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742338_1514 (size=663) 2024-12-16T21:03:49,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742338_1514 (size=663) 2024-12-16T21:03:49,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742338_1514 (size=663) 2024-12-16T21:03:49,605 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T21:03:49,609 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T21:03:49,609 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-16T21:03:49,610 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T21:03:49,610 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 206 2024-12-16T21:03:49,611 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=206, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 643 msec 2024-12-16T21:03:50,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=206 2024-12-16T21:03:50,071 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 206 completed 2024-12-16T21:03:50,071 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734383030071 2024-12-16T21:03:50,071 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:37263, tgtDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734383030071, rawTgtDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734383030071, srcFsUri=hdfs://localhost:37263, srcDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:03:50,100 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:37263, inputRoot=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba 2024-12-16T21:03:50,100 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-715246216_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734383030071, skipTmp=true, initialOutputSnapshotDir=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734383030071/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-16T21:03:50,101 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-16T21:03:50,105 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734383030071/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-16T21:03:50,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742339_1515 (size=198) 2024-12-16T21:03:50,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742339_1515 (size=198) 2024-12-16T21:03:50,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742339_1515 (size=198) 2024-12-16T21:03:50,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742340_1516 (size=663) 2024-12-16T21:03:50,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742340_1516 (size=663) 2024-12-16T21:03:50,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742340_1516 (size=663) 2024-12-16T21:03:50,117 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-16T21:03:50,117 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-16T21:03:50,117 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-16T21:03:50,118 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-16T21:03:50,871 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T21:03:50,871 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp Metrics about Tables on a single HBase RegionServer 2024-12-16T21:03:50,872 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-16T21:03:51,084 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/hadoop-6299241876986162662.jar 2024-12-16T21:03:51,084 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-16T21:03:51,084 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-16T21:03:51,151 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/hadoop-6494735461283948404.jar 2024-12-16T21:03:51,151 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-16T21:03:51,152 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-16T21:03:51,152 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-16T21:03:51,152 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-16T21:03:51,152 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-16T21:03:51,152 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-16T21:03:51,152 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-16T21:03:51,153 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-16T21:03:51,153 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-16T21:03:51,153 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-16T21:03:51,153 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-16T21:03:51,153 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-16T21:03:51,154 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-16T21:03:51,154 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-16T21:03:51,154 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-16T21:03:51,154 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-16T21:03:51,154 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-16T21:03:51,154 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-16T21:03:51,155 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T21:03:51,155 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T21:03:51,155 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T21:03:51,156 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T21:03:51,156 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T21:03:51,156 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T21:03:51,156 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T21:03:51,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742341_1517 (size=127628) 2024-12-16T21:03:51,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742341_1517 (size=127628) 2024-12-16T21:03:51,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742341_1517 (size=127628) 2024-12-16T21:03:51,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742342_1518 (size=2172137) 2024-12-16T21:03:51,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742342_1518 (size=2172137) 2024-12-16T21:03:51,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742342_1518 (size=2172137) 2024-12-16T21:03:51,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742343_1519 (size=213228) 2024-12-16T21:03:51,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742343_1519 (size=213228) 2024-12-16T21:03:51,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742343_1519 (size=213228) 2024-12-16T21:03:51,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742344_1520 (size=1877034) 2024-12-16T21:03:51,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742344_1520 (size=1877034) 2024-12-16T21:03:51,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742344_1520 (size=1877034) 2024-12-16T21:03:51,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742345_1521 (size=533455) 2024-12-16T21:03:51,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742345_1521 (size=533455) 2024-12-16T21:03:51,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742345_1521 (size=533455) 2024-12-16T21:03:51,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742346_1522 (size=7280644) 2024-12-16T21:03:51,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742346_1522 (size=7280644) 2024-12-16T21:03:51,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742346_1522 (size=7280644) 2024-12-16T21:03:51,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742347_1523 (size=4188619) 2024-12-16T21:03:51,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742347_1523 (size=4188619) 2024-12-16T21:03:51,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742347_1523 (size=4188619) 2024-12-16T21:03:51,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742348_1524 (size=20406) 2024-12-16T21:03:51,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742348_1524 (size=20406) 2024-12-16T21:03:51,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742348_1524 (size=20406) 2024-12-16T21:03:51,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742349_1525 (size=75495) 2024-12-16T21:03:51,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742349_1525 (size=75495) 2024-12-16T21:03:51,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742349_1525 (size=75495) 2024-12-16T21:03:51,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742350_1526 (size=45609) 2024-12-16T21:03:51,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742350_1526 (size=45609) 2024-12-16T21:03:51,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742350_1526 (size=45609) 2024-12-16T21:03:51,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742351_1527 (size=110084) 2024-12-16T21:03:51,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742351_1527 (size=110084) 2024-12-16T21:03:51,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742351_1527 (size=110084) 2024-12-16T21:03:51,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742352_1528 (size=1323991) 2024-12-16T21:03:51,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742352_1528 (size=1323991) 2024-12-16T21:03:51,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742352_1528 (size=1323991) 2024-12-16T21:03:51,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742353_1529 (size=23076) 2024-12-16T21:03:51,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742353_1529 (size=23076) 2024-12-16T21:03:51,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742353_1529 (size=23076) 2024-12-16T21:03:51,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742354_1530 (size=126803) 2024-12-16T21:03:51,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742354_1530 (size=126803) 2024-12-16T21:03:51,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742354_1530 (size=126803) 2024-12-16T21:03:51,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742355_1531 (size=322274) 2024-12-16T21:03:51,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742355_1531 (size=322274) 2024-12-16T21:03:51,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742355_1531 (size=322274) 2024-12-16T21:03:51,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742356_1532 (size=6350920) 2024-12-16T21:03:51,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742356_1532 (size=6350920) 2024-12-16T21:03:51,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742356_1532 (size=6350920) 2024-12-16T21:03:51,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742357_1533 (size=1832290) 2024-12-16T21:03:51,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742357_1533 (size=1832290) 2024-12-16T21:03:51,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742357_1533 (size=1832290) 2024-12-16T21:03:51,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742358_1534 (size=30081) 2024-12-16T21:03:51,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742358_1534 (size=30081) 2024-12-16T21:03:51,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742358_1534 (size=30081) 2024-12-16T21:03:51,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742359_1535 (size=53616) 2024-12-16T21:03:51,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742359_1535 (size=53616) 2024-12-16T21:03:51,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742359_1535 (size=53616) 2024-12-16T21:03:51,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742360_1536 (size=29229) 2024-12-16T21:03:51,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742360_1536 (size=29229) 2024-12-16T21:03:51,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742360_1536 (size=29229) 2024-12-16T21:03:51,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742361_1537 (size=169089) 2024-12-16T21:03:51,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742361_1537 (size=169089) 2024-12-16T21:03:51,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742361_1537 (size=169089) 2024-12-16T21:03:51,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742362_1538 (size=451756) 2024-12-16T21:03:51,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742362_1538 (size=451756) 2024-12-16T21:03:51,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742362_1538 (size=451756) 2024-12-16T21:03:51,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742363_1539 (size=5175431) 2024-12-16T21:03:51,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742363_1539 (size=5175431) 2024-12-16T21:03:51,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742363_1539 (size=5175431) 2024-12-16T21:03:51,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742364_1540 (size=136454) 2024-12-16T21:03:51,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742364_1540 (size=136454) 2024-12-16T21:03:51,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742364_1540 (size=136454) 2024-12-16T21:03:51,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742365_1541 (size=907465) 2024-12-16T21:03:51,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742365_1541 (size=907465) 2024-12-16T21:03:51,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742365_1541 (size=907465) 2024-12-16T21:03:51,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742366_1542 (size=3317408) 2024-12-16T21:03:51,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742366_1542 (size=3317408) 2024-12-16T21:03:51,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742366_1542 (size=3317408) 2024-12-16T21:03:51,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742367_1543 (size=503880) 2024-12-16T21:03:51,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742367_1543 (size=503880) 2024-12-16T21:03:51,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742367_1543 (size=503880) 2024-12-16T21:03:51,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742368_1544 (size=4695811) 2024-12-16T21:03:51,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742368_1544 (size=4695811) 2024-12-16T21:03:51,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742368_1544 (size=4695811) 2024-12-16T21:03:51,478 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-16T21:03:51,480 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportFileSystemStateWithSkipTmp' hfile list 2024-12-16T21:03:51,481 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-16T21:03:51,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742369_1545 (size=366) 2024-12-16T21:03:51,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742369_1545 (size=366) 2024-12-16T21:03:51,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742369_1545 (size=366) 2024-12-16T21:03:51,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742370_1546 (size=15) 2024-12-16T21:03:51,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742370_1546 (size=15) 2024-12-16T21:03:51,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742370_1546 (size=15) 2024-12-16T21:03:51,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742371_1547 (size=305053) 2024-12-16T21:03:51,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742371_1547 (size=305053) 2024-12-16T21:03:51,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742371_1547 (size=305053) 2024-12-16T21:03:51,910 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-16T21:03:51,910 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-16T21:03:51,912 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0009_000001 (auth:SIMPLE) from 127.0.0.1:51126 2024-12-16T21:03:51,924 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-1_2/usercache/jenkins/appcache/application_1734382828217_0009/container_1734382828217_0009_01_000001/launch_container.sh] 2024-12-16T21:03:51,924 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-1_2/usercache/jenkins/appcache/application_1734382828217_0009/container_1734382828217_0009_01_000001/container_tokens] 2024-12-16T21:03:51,924 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-1_2/usercache/jenkins/appcache/application_1734382828217_0009/container_1734382828217_0009_01_000001/sysfs] 2024-12-16T21:03:52,713 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0010_000001 (auth:SIMPLE) from 127.0.0.1:57966 2024-12-16T21:03:53,236 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-16T21:03:58,226 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0010_000001 (auth:SIMPLE) from 127.0.0.1:55820 2024-12-16T21:03:58,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742372_1548 (size=350751) 2024-12-16T21:03:58,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742372_1548 (size=350751) 2024-12-16T21:03:58,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742372_1548 (size=350751) 2024-12-16T21:04:00,492 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0010_000001 (auth:SIMPLE) from 127.0.0.1:56460 2024-12-16T21:04:03,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742373_1549 (size=8392) 2024-12-16T21:04:03,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742373_1549 (size=8392) 2024-12-16T21:04:03,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742373_1549 (size=8392) 2024-12-16T21:04:03,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742374_1550 (size=5216) 2024-12-16T21:04:03,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742374_1550 (size=5216) 2024-12-16T21:04:03,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742374_1550 (size=5216) 2024-12-16T21:04:03,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742375_1551 (size=17455) 2024-12-16T21:04:03,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742375_1551 (size=17455) 2024-12-16T21:04:03,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742375_1551 (size=17455) 2024-12-16T21:04:03,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742376_1552 (size=476) 2024-12-16T21:04:03,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742376_1552 (size=476) 2024-12-16T21:04:03,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742376_1552 (size=476) 2024-12-16T21:04:03,980 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-0_1/usercache/jenkins/appcache/application_1734382828217_0010/container_1734382828217_0010_01_000002/launch_container.sh] 2024-12-16T21:04:03,980 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-0_1/usercache/jenkins/appcache/application_1734382828217_0010/container_1734382828217_0010_01_000002/container_tokens] 2024-12-16T21:04:03,980 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-0_1/usercache/jenkins/appcache/application_1734382828217_0010/container_1734382828217_0010_01_000002/sysfs] 2024-12-16T21:04:03,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742377_1553 (size=17455) 2024-12-16T21:04:03,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742377_1553 (size=17455) 2024-12-16T21:04:03,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742377_1553 (size=17455) 2024-12-16T21:04:04,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742378_1554 (size=350751) 2024-12-16T21:04:04,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742378_1554 (size=350751) 2024-12-16T21:04:04,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742378_1554 (size=350751) 2024-12-16T21:04:04,017 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0010_000001 (auth:SIMPLE) from 127.0.0.1:56466 2024-12-16T21:04:05,180 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 3f3089e09ebc056cacee8e9b5d95cda7, had cached 0 bytes from a total of 8392 2024-12-16T21:04:05,180 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 5700cdf8b31b01a35a4ac5bf632afccd, had cached 0 bytes from a total of 5216 2024-12-16T21:04:05,825 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-16T21:04:05,825 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-16T21:04:05,831 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-16T21:04:05,831 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-16T21:04:05,831 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-16T21:04:05,831 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-715246216_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-16T21:04:05,831 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-16T21:04:05,831 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-16T21:04:05,831 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-715246216_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734383030071/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734383030071/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-16T21:04:05,832 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734383030071/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-16T21:04:05,832 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/export-test/export-1734383030071/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-16T21:04:05,836 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T21:04:05,837 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T21:04:05,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=209, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T21:04:05,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=209 2024-12-16T21:04:05,839 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734383045838"}]},"ts":"1734383045838"} 2024-12-16T21:04:05,840 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLING in hbase:meta 2024-12-16T21:04:05,841 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLING 2024-12-16T21:04:05,842 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=210, ppid=209, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp}] 2024-12-16T21:04:05,843 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=211, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=6f189cc9aa16f8410beebe8f514f65fe, UNASSIGN}, {pid=212, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=fb7ea73bc3c7323f3a0cafeb76228700, UNASSIGN}] 2024-12-16T21:04:05,843 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=211, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=6f189cc9aa16f8410beebe8f514f65fe, UNASSIGN 2024-12-16T21:04:05,843 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=212, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=fb7ea73bc3c7323f3a0cafeb76228700, UNASSIGN 2024-12-16T21:04:05,844 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=211 updating hbase:meta row=6f189cc9aa16f8410beebe8f514f65fe, regionState=CLOSING, regionLocation=6b259cf15929,36861,1734382821216 2024-12-16T21:04:05,844 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=212 updating hbase:meta row=fb7ea73bc3c7323f3a0cafeb76228700, regionState=CLOSING, regionLocation=6b259cf15929,41713,1734382821274 2024-12-16T21:04:05,845 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T21:04:05,845 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=213, ppid=211, state=RUNNABLE; CloseRegionProcedure 6f189cc9aa16f8410beebe8f514f65fe, server=6b259cf15929,36861,1734382821216}] 2024-12-16T21:04:05,845 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T21:04:05,845 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=214, ppid=212, state=RUNNABLE; CloseRegionProcedure fb7ea73bc3c7323f3a0cafeb76228700, server=6b259cf15929,41713,1734382821274}] 2024-12-16T21:04:05,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=209 2024-12-16T21:04:05,996 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6b259cf15929,41713,1734382821274 2024-12-16T21:04:05,996 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6b259cf15929,36861,1734382821216 2024-12-16T21:04:05,996 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] handler.UnassignRegionHandler(124): Close 6f189cc9aa16f8410beebe8f514f65fe 2024-12-16T21:04:05,996 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] handler.UnassignRegionHandler(124): Close fb7ea73bc3c7323f3a0cafeb76228700 2024-12-16T21:04:05,996 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T21:04:05,996 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T21:04:05,997 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1681): Closing 6f189cc9aa16f8410beebe8f514f65fe, disabling compactions & flushes 2024-12-16T21:04:05,997 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1681): Closing fb7ea73bc3c7323f3a0cafeb76228700, disabling compactions & flushes 2024-12-16T21:04:05,997 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1734383028003.6f189cc9aa16f8410beebe8f514f65fe. 2024-12-16T21:04:05,997 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1734383028003.fb7ea73bc3c7323f3a0cafeb76228700. 2024-12-16T21:04:05,997 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1734383028003.fb7ea73bc3c7323f3a0cafeb76228700. 2024-12-16T21:04:05,997 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1734383028003.6f189cc9aa16f8410beebe8f514f65fe. 2024-12-16T21:04:05,997 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1734383028003.6f189cc9aa16f8410beebe8f514f65fe. after waiting 0 ms 2024-12-16T21:04:05,997 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1734383028003.fb7ea73bc3c7323f3a0cafeb76228700. after waiting 0 ms 2024-12-16T21:04:05,997 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1734383028003.fb7ea73bc3c7323f3a0cafeb76228700. 2024-12-16T21:04:05,997 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1734383028003.6f189cc9aa16f8410beebe8f514f65fe. 2024-12-16T21:04:06,001 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithSkipTmp/fb7ea73bc3c7323f3a0cafeb76228700/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T21:04:06,001 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithSkipTmp/6f189cc9aa16f8410beebe8f514f65fe/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T21:04:06,001 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T21:04:06,001 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T21:04:06,001 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1734383028003.fb7ea73bc3c7323f3a0cafeb76228700. 2024-12-16T21:04:06,001 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithSkipTmp,,1734383028003.6f189cc9aa16f8410beebe8f514f65fe. 2024-12-16T21:04:06,001 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1635): Region close journal for fb7ea73bc3c7323f3a0cafeb76228700: 2024-12-16T21:04:06,002 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1635): Region close journal for 6f189cc9aa16f8410beebe8f514f65fe: 2024-12-16T21:04:06,003 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] handler.UnassignRegionHandler(170): Closed 6f189cc9aa16f8410beebe8f514f65fe 2024-12-16T21:04:06,003 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=211 updating hbase:meta row=6f189cc9aa16f8410beebe8f514f65fe, regionState=CLOSED 2024-12-16T21:04:06,003 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] handler.UnassignRegionHandler(170): Closed fb7ea73bc3c7323f3a0cafeb76228700 2024-12-16T21:04:06,003 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=212 updating hbase:meta row=fb7ea73bc3c7323f3a0cafeb76228700, regionState=CLOSED 2024-12-16T21:04:06,005 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=213, resume processing ppid=211 2024-12-16T21:04:06,005 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=213, ppid=211, state=SUCCESS; CloseRegionProcedure 6f189cc9aa16f8410beebe8f514f65fe, server=6b259cf15929,36861,1734382821216 in 159 msec 2024-12-16T21:04:06,006 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=214, resume processing ppid=212 2024-12-16T21:04:06,006 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=214, ppid=212, state=SUCCESS; CloseRegionProcedure fb7ea73bc3c7323f3a0cafeb76228700, server=6b259cf15929,41713,1734382821274 in 159 msec 2024-12-16T21:04:06,006 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=211, ppid=210, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=6f189cc9aa16f8410beebe8f514f65fe, UNASSIGN in 162 msec 2024-12-16T21:04:06,007 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=212, resume processing ppid=210 2024-12-16T21:04:06,007 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=212, ppid=210, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=fb7ea73bc3c7323f3a0cafeb76228700, UNASSIGN in 163 msec 2024-12-16T21:04:06,008 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=210, resume processing ppid=209 2024-12-16T21:04:06,008 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=210, ppid=209, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 165 msec 2024-12-16T21:04:06,009 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734383046009"}]},"ts":"1734383046009"} 2024-12-16T21:04:06,010 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLED in hbase:meta 2024-12-16T21:04:06,012 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLED 2024-12-16T21:04:06,013 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=209, state=SUCCESS; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 175 msec 2024-12-16T21:04:06,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=209 2024-12-16T21:04:06,140 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 209 completed 2024-12-16T21:04:06,140 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T21:04:06,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] procedure2.ProcedureExecutor(1098): Stored pid=215, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T21:04:06,141 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=215, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T21:04:06,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T21:04:06,142 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=215, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T21:04:06,143 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42323 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T21:04:06,145 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithSkipTmp/6f189cc9aa16f8410beebe8f514f65fe 2024-12-16T21:04:06,145 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithSkipTmp/fb7ea73bc3c7323f3a0cafeb76228700 2024-12-16T21:04:06,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T21:04:06,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T21:04:06,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T21:04:06,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T21:04:06,146 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-16T21:04:06,146 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-16T21:04:06,146 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-16T21:04:06,146 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-16T21:04:06,147 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithSkipTmp/fb7ea73bc3c7323f3a0cafeb76228700/cf, FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithSkipTmp/fb7ea73bc3c7323f3a0cafeb76228700/recovered.edits] 2024-12-16T21:04:06,147 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithSkipTmp/6f189cc9aa16f8410beebe8f514f65fe/cf, FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithSkipTmp/6f189cc9aa16f8410beebe8f514f65fe/recovered.edits] 2024-12-16T21:04:06,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T21:04:06,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T21:04:06,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T21:04:06,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T21:04:06,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:04:06,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:04:06,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:04:06,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T21:04:06,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=215 2024-12-16T21:04:06,148 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T21:04:06,149 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T21:04:06,149 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T21:04:06,149 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T21:04:06,151 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithSkipTmp/6f189cc9aa16f8410beebe8f514f65fe/cf/2f124ee6810f4c6599769a6403b16c52 to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/6f189cc9aa16f8410beebe8f514f65fe/cf/2f124ee6810f4c6599769a6403b16c52 2024-12-16T21:04:06,151 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithSkipTmp/fb7ea73bc3c7323f3a0cafeb76228700/cf/c2a592812e054233b96c92d422092a42 to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/fb7ea73bc3c7323f3a0cafeb76228700/cf/c2a592812e054233b96c92d422092a42 2024-12-16T21:04:06,153 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithSkipTmp/6f189cc9aa16f8410beebe8f514f65fe/recovered.edits/9.seqid to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/6f189cc9aa16f8410beebe8f514f65fe/recovered.edits/9.seqid 2024-12-16T21:04:06,153 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithSkipTmp/fb7ea73bc3c7323f3a0cafeb76228700/recovered.edits/9.seqid to hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/fb7ea73bc3c7323f3a0cafeb76228700/recovered.edits/9.seqid 2024-12-16T21:04:06,154 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithSkipTmp/fb7ea73bc3c7323f3a0cafeb76228700 2024-12-16T21:04:06,154 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testtb-testExportFileSystemStateWithSkipTmp/6f189cc9aa16f8410beebe8f514f65fe 2024-12-16T21:04:06,154 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSkipTmp regions 2024-12-16T21:04:06,155 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=215, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T21:04:06,157 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSkipTmp from hbase:meta 2024-12-16T21:04:06,159 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemStateWithSkipTmp' descriptor. 2024-12-16T21:04:06,160 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=215, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T21:04:06,160 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemStateWithSkipTmp' from region states. 2024-12-16T21:04:06,160 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1734383028003.6f189cc9aa16f8410beebe8f514f65fe.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734383046160"}]},"ts":"9223372036854775807"} 2024-12-16T21:04:06,160 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1734383028003.fb7ea73bc3c7323f3a0cafeb76228700.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734383046160"}]},"ts":"9223372036854775807"} 2024-12-16T21:04:06,161 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-16T21:04:06,161 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 6f189cc9aa16f8410beebe8f514f65fe, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1734383028003.6f189cc9aa16f8410beebe8f514f65fe.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => fb7ea73bc3c7323f3a0cafeb76228700, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1734383028003.fb7ea73bc3c7323f3a0cafeb76228700.', STARTKEY => '1', ENDKEY => ''}] 2024-12-16T21:04:06,161 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemStateWithSkipTmp' as deleted. 2024-12-16T21:04:06,161 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734383046161"}]},"ts":"9223372036854775807"} 2024-12-16T21:04:06,162 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportFileSystemStateWithSkipTmp state from META 2024-12-16T21:04:06,164 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=215, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T21:04:06,165 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=215, state=SUCCESS; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 24 msec 2024-12-16T21:04:06,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=215 2024-12-16T21:04:06,249 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 215 completed 2024-12-16T21:04:06,253 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSkipTmp" 2024-12-16T21:04:06,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-16T21:04:06,255 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSkipTmp" 2024-12-16T21:04:06,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-16T21:04:06,275 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=814 (was 810) Potentially hanging thread: HFileArchiver-23 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x70b78df7-shared-pool-51 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-715246216_22 at /127.0.0.1:51200 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41213 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x70b78df7-shared-pool-52 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x70b78df7-shared-pool-49 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1409570251_1 at /127.0.0.1:51180 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1409570251_1 at /127.0.0.1:42700 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-715246216_22 at /127.0.0.1:45552 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-7612 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: hconnection-0x70b78df7-shared-pool-50 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1891196410) connection to localhost/127.0.0.1:41213 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-24 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 15581) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-715246216_22 at /127.0.0.1:42728 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=811 (was 811), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=430 (was 400) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 17), AvailableMemoryMB=2771 (was 2833) 2024-12-16T21:04:06,275 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=814 is superior to 500 2024-12-16T21:04:06,275 INFO [Time-limited test {}] hbase.HBaseTestingUtility(2861): Stopping mini mapreduce cluster... 2024-12-16T21:04:06,282 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4792a562{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-16T21:04:06,284 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5cad8bdd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T21:04:06,284 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T21:04:06,284 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5b38e25f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-16T21:04:06,285 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5b029b2d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/hadoop.log.dir/,STOPPED} 2024-12-16T21:04:10,079 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734382828217_0010_000001 (auth:SIMPLE) from 127.0.0.1:56482 2024-12-16T21:04:10,090 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-0_2/usercache/jenkins/appcache/application_1734382828217_0010/container_1734382828217_0010_01_000001/launch_container.sh] 2024-12-16T21:04:10,090 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-0_2/usercache/jenkins/appcache/application_1734382828217_0010/container_1734382828217_0010_01_000001/container_tokens] 2024-12-16T21:04:10,090 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/MiniMRCluster_2035536497/yarn-189705687/MiniMRCluster_2035536497-localDir-nm-0_2/usercache/jenkins/appcache/application_1734382828217_0010/container_1734382828217_0010_01_000001/sysfs] 2024-12-16T21:04:10,871 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T21:04:11,521 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-16T21:04:19,283 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T21:04:23,296 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6b8e94fd{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-16T21:04:23,297 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1cd22596{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T21:04:23,297 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T21:04:23,297 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4e6d339c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-16T21:04:23,297 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f570563{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/hadoop.log.dir/,STOPPED} 2024-12-16T21:04:40,304 ERROR [Thread[Thread-417,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-16T21:04:40,305 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4c30d1c{cluster,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-16T21:04:40,305 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@64e87700{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T21:04:40,305 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T21:04:40,305 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@268d8bfd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-16T21:04:40,306 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@39328466{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/hadoop.log.dir/,STOPPED} 2024-12-16T21:04:40,308 WARN [ApplicationMaster Launcher {}] amlauncher.ApplicationMasterLauncher$LauncherThread(122): org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher$LauncherThread interrupted. Returning. 2024-12-16T21:04:40,313 ERROR [SchedulerEventDispatcher:Event Processor {}] event.EventDispatcher$EventProcessor(72): Returning, interrupted : java.lang.InterruptedException 2024-12-16T21:04:40,313 ERROR [ResourceManager Event Processor Monitor {}] resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessorMonitor(1193): Returning, interrupted : java.lang.InterruptedException: sleep interrupted 2024-12-16T21:04:40,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741830_1006 (size=946741) 2024-12-16T21:04:40,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741830_1006 (size=946741) 2024-12-16T21:04:40,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741830_1006 (size=946741) 2024-12-16T21:04:40,318 ERROR [Thread[Thread-440,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-16T21:04:40,321 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@31993999{jobhistory,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-16T21:04:40,322 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@49999e44{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T21:04:40,322 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T21:04:40,322 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@15266e52{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-16T21:04:40,322 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@a809efb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/hadoop.log.dir/,STOPPED} 2024-12-16T21:04:40,324 ERROR [Thread[Thread-399,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-16T21:04:40,325 INFO [Time-limited test {}] hbase.HBaseTestingUtility(2864): Mini mapreduce cluster stopped 2024-12-16T21:04:40,325 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-16T21:04:40,325 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-16T21:04:40,325 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x75e74344 to 127.0.0.1:61742 2024-12-16T21:04:40,325 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:04:40,325 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-16T21:04:40,325 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1525585108, stopped=false 2024-12-16T21:04:40,326 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T21:04:40,326 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-16T21:04:40,326 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=6b259cf15929,34343,1734382820356 2024-12-16T21:04:40,327 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-16T21:04:40,327 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-16T21:04:40,327 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-16T21:04:40,327 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T21:04:40,327 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T21:04:40,327 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T21:04:40,327 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-16T21:04:40,327 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-16T21:04:40,327 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T21:04:40,328 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:04:40,328 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '6b259cf15929,42323,1734382821139' ***** 2024-12-16T21:04:40,328 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T21:04:40,328 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T21:04:40,328 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T21:04:40,328 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T21:04:40,328 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-16T21:04:40,328 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '6b259cf15929,36861,1734382821216' ***** 2024-12-16T21:04:40,328 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T21:04:40,329 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T21:04:40,329 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-16T21:04:40,329 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '6b259cf15929,41713,1734382821274' ***** 2024-12-16T21:04:40,329 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T21:04:40,329 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-16T21:04:40,329 INFO [RS:0;6b259cf15929:42323 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-16T21:04:40,329 INFO [RS:1;6b259cf15929:36861 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-16T21:04:40,329 INFO [RS:0;6b259cf15929:42323 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-16T21:04:40,329 INFO [RS:2;6b259cf15929:41713 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-16T21:04:40,329 INFO [RS:1;6b259cf15929:36861 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-16T21:04:40,329 INFO [RS:2;6b259cf15929:41713 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-16T21:04:40,329 INFO [RS:0;6b259cf15929:42323 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-16T21:04:40,329 INFO [RS:1;6b259cf15929:36861 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-16T21:04:40,329 INFO [RS:2;6b259cf15929:41713 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-16T21:04:40,329 INFO [RS:1;6b259cf15929:36861 {}] regionserver.HRegionServer(1224): stopping server 6b259cf15929,36861,1734382821216 2024-12-16T21:04:40,329 DEBUG [RS:1;6b259cf15929:36861 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:04:40,329 INFO [RS:2;6b259cf15929:41713 {}] regionserver.HRegionServer(3579): Received CLOSE for 3f3089e09ebc056cacee8e9b5d95cda7 2024-12-16T21:04:40,329 INFO [RS:1;6b259cf15929:36861 {}] regionserver.HRegionServer(1250): stopping server 6b259cf15929,36861,1734382821216; all regions closed. 2024-12-16T21:04:40,329 INFO [RS:0;6b259cf15929:42323 {}] regionserver.HRegionServer(3579): Received CLOSE for 5700cdf8b31b01a35a4ac5bf632afccd 2024-12-16T21:04:40,329 INFO [RS:2;6b259cf15929:41713 {}] regionserver.HRegionServer(1224): stopping server 6b259cf15929,41713,1734382821274 2024-12-16T21:04:40,329 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-16T21:04:40,329 DEBUG [RS:2;6b259cf15929:41713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:04:40,329 INFO [RS:0;6b259cf15929:42323 {}] regionserver.HRegionServer(3579): Received CLOSE for 5ff33ba35f8500425d07b6bfd353cd16 2024-12-16T21:04:40,330 INFO [RS:0;6b259cf15929:42323 {}] regionserver.HRegionServer(3579): Received CLOSE for ffdc28263b845c2b16a120a0817fe8a5 2024-12-16T21:04:40,330 INFO [RS:2;6b259cf15929:41713 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-16T21:04:40,330 INFO [RS:2;6b259cf15929:41713 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-16T21:04:40,330 INFO [RS:0;6b259cf15929:42323 {}] regionserver.HRegionServer(1224): stopping server 6b259cf15929,42323,1734382821139 2024-12-16T21:04:40,330 INFO [RS:2;6b259cf15929:41713 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-16T21:04:40,330 DEBUG [RS:0;6b259cf15929:42323 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:04:40,330 INFO [RS:2;6b259cf15929:41713 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-16T21:04:40,330 INFO [RS:0;6b259cf15929:42323 {}] regionserver.HRegionServer(1599): Waiting on 3 regions to close 2024-12-16T21:04:40,330 DEBUG [RS:0;6b259cf15929:42323 {}] regionserver.HRegionServer(1603): Online Regions={5700cdf8b31b01a35a4ac5bf632afccd=testExportExpiredSnapshot,,1734382954843.5700cdf8b31b01a35a4ac5bf632afccd., 5ff33ba35f8500425d07b6bfd353cd16=hbase:acl,,1734382824921.5ff33ba35f8500425d07b6bfd353cd16., ffdc28263b845c2b16a120a0817fe8a5=hbase:namespace,,1734382824291.ffdc28263b845c2b16a120a0817fe8a5.} 2024-12-16T21:04:40,330 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-16T21:04:40,330 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 3f3089e09ebc056cacee8e9b5d95cda7, disabling compactions & flushes 2024-12-16T21:04:40,330 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region testExportExpiredSnapshot,1,1734382954843.3f3089e09ebc056cacee8e9b5d95cda7. 2024-12-16T21:04:40,330 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-16T21:04:40,330 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportExpiredSnapshot,1,1734382954843.3f3089e09ebc056cacee8e9b5d95cda7. 2024-12-16T21:04:40,330 INFO [RS:2;6b259cf15929:41713 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-16T21:04:40,330 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on testExportExpiredSnapshot,1,1734382954843.3f3089e09ebc056cacee8e9b5d95cda7. after waiting 0 ms 2024-12-16T21:04:40,330 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region testExportExpiredSnapshot,1,1734382954843.3f3089e09ebc056cacee8e9b5d95cda7. 2024-12-16T21:04:40,330 DEBUG [RS:2;6b259cf15929:41713 {}] regionserver.HRegionServer(1603): Online Regions={3f3089e09ebc056cacee8e9b5d95cda7=testExportExpiredSnapshot,1,1734382954843.3f3089e09ebc056cacee8e9b5d95cda7., 1588230740=hbase:meta,,1.1588230740} 2024-12-16T21:04:40,330 DEBUG [RS_CLOSE_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-16T21:04:40,330 INFO [RS_CLOSE_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-16T21:04:40,330 DEBUG [RS_CLOSE_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-16T21:04:40,330 DEBUG [RS_CLOSE_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-16T21:04:40,330 DEBUG [RS_CLOSE_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-16T21:04:40,331 INFO [RS_CLOSE_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=68.66 KB heapSize=109 KB 2024-12-16T21:04:40,332 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 5700cdf8b31b01a35a4ac5bf632afccd, disabling compactions & flushes 2024-12-16T21:04:40,332 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region testExportExpiredSnapshot,,1734382954843.5700cdf8b31b01a35a4ac5bf632afccd. 2024-12-16T21:04:40,332 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportExpiredSnapshot,,1734382954843.5700cdf8b31b01a35a4ac5bf632afccd. 2024-12-16T21:04:40,332 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on testExportExpiredSnapshot,,1734382954843.5700cdf8b31b01a35a4ac5bf632afccd. after waiting 0 ms 2024-12-16T21:04:40,333 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region testExportExpiredSnapshot,,1734382954843.5700cdf8b31b01a35a4ac5bf632afccd. 2024-12-16T21:04:40,334 DEBUG [RS:0;6b259cf15929:42323 {}] regionserver.HRegionServer(1629): Waiting on 5700cdf8b31b01a35a4ac5bf632afccd, 5ff33ba35f8500425d07b6bfd353cd16, ffdc28263b845c2b16a120a0817fe8a5 2024-12-16T21:04:40,334 DEBUG [RS:2;6b259cf15929:41713 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 3f3089e09ebc056cacee8e9b5d95cda7 2024-12-16T21:04:40,335 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportExpiredSnapshot/3f3089e09ebc056cacee8e9b5d95cda7/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-16T21:04:40,335 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T21:04:40,335 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed testExportExpiredSnapshot,1,1734382954843.3f3089e09ebc056cacee8e9b5d95cda7. 2024-12-16T21:04:40,335 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 3f3089e09ebc056cacee8e9b5d95cda7: 2024-12-16T21:04:40,335 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,1,1734382954843.3f3089e09ebc056cacee8e9b5d95cda7. 2024-12-16T21:04:40,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741834_1010 (size=8083) 2024-12-16T21:04:40,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741834_1010 (size=8083) 2024-12-16T21:04:40,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741834_1010 (size=8083) 2024-12-16T21:04:40,337 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/default/testExportExpiredSnapshot/5700cdf8b31b01a35a4ac5bf632afccd/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-16T21:04:40,338 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T21:04:40,338 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed testExportExpiredSnapshot,,1734382954843.5700cdf8b31b01a35a4ac5bf632afccd. 2024-12-16T21:04:40,338 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 5700cdf8b31b01a35a4ac5bf632afccd: 2024-12-16T21:04:40,338 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,,1734382954843.5700cdf8b31b01a35a4ac5bf632afccd. 2024-12-16T21:04:40,338 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 5ff33ba35f8500425d07b6bfd353cd16, disabling compactions & flushes 2024-12-16T21:04:40,338 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:acl,,1734382824921.5ff33ba35f8500425d07b6bfd353cd16. 2024-12-16T21:04:40,338 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:acl,,1734382824921.5ff33ba35f8500425d07b6bfd353cd16. 2024-12-16T21:04:40,338 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:acl,,1734382824921.5ff33ba35f8500425d07b6bfd353cd16. after waiting 0 ms 2024-12-16T21:04:40,338 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:acl,,1734382824921.5ff33ba35f8500425d07b6bfd353cd16. 2024-12-16T21:04:40,338 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 5ff33ba35f8500425d07b6bfd353cd16 1/1 column families, dataSize=1.38 KB heapSize=3.33 KB 2024-12-16T21:04:40,338 DEBUG [RS:1;6b259cf15929:36861 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/oldWALs 2024-12-16T21:04:40,338 INFO [RS:1;6b259cf15929:36861 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 6b259cf15929%2C36861%2C1734382821216:(num 1734382823427) 2024-12-16T21:04:40,338 DEBUG [RS:1;6b259cf15929:36861 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:04:40,339 INFO [RS:1;6b259cf15929:36861 {}] regionserver.LeaseManager(133): Closed leases 2024-12-16T21:04:40,339 INFO [RS:1;6b259cf15929:36861 {}] hbase.ChoreService(370): Chore service for: regionserver/6b259cf15929:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-16T21:04:40,339 INFO [RS:1;6b259cf15929:36861 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-16T21:04:40,339 INFO [RS:1;6b259cf15929:36861 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-16T21:04:40,339 INFO [RS:1;6b259cf15929:36861 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-16T21:04:40,339 INFO [regionserver/6b259cf15929:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-16T21:04:40,339 INFO [RS:1;6b259cf15929:36861 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:36861 2024-12-16T21:04:40,344 INFO [regionserver/6b259cf15929:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-16T21:04:40,347 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-16T21:04:40,347 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6b259cf15929,36861,1734382821216 2024-12-16T21:04:40,349 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6b259cf15929,36861,1734382821216] 2024-12-16T21:04:40,349 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 6b259cf15929,36861,1734382821216; numProcessing=1 2024-12-16T21:04:40,350 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/6b259cf15929,36861,1734382821216 already deleted, retry=false 2024-12-16T21:04:40,350 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 6b259cf15929,36861,1734382821216 expired; onlineServers=2 2024-12-16T21:04:40,353 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/hbase/acl/5ff33ba35f8500425d07b6bfd353cd16/.tmp/l/1d7fcedd064b44ed8ec1c3f57871a630 is 74, key is testtb-testExportFileSystemStateWithMergeRegion-1/l:/1734382952971/DeleteFamily/seqid=0 2024-12-16T21:04:40,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742379_1555 (size=5695) 2024-12-16T21:04:40,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742379_1555 (size=5695) 2024-12-16T21:04:40,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742379_1555 (size=5695) 2024-12-16T21:04:40,358 INFO [regionserver/6b259cf15929:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-16T21:04:40,359 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.38 KB at sequenceid=27 (bloomFilter=false), to=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/hbase/acl/5ff33ba35f8500425d07b6bfd353cd16/.tmp/l/1d7fcedd064b44ed8ec1c3f57871a630 2024-12-16T21:04:40,362 DEBUG [RS_CLOSE_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/hbase/meta/1588230740/.tmp/info/8aab718f19d248948ae2ab42bf1ac176 is 173, key is testExportExpiredSnapshot,1,1734382954843.3f3089e09ebc056cacee8e9b5d95cda7./info:regioninfo/1734382955190/Put/seqid=0 2024-12-16T21:04:40,363 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 1d7fcedd064b44ed8ec1c3f57871a630 2024-12-16T21:04:40,364 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/hbase/acl/5ff33ba35f8500425d07b6bfd353cd16/.tmp/l/1d7fcedd064b44ed8ec1c3f57871a630 as hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/hbase/acl/5ff33ba35f8500425d07b6bfd353cd16/l/1d7fcedd064b44ed8ec1c3f57871a630 2024-12-16T21:04:40,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742380_1556 (size=15630) 2024-12-16T21:04:40,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742380_1556 (size=15630) 2024-12-16T21:04:40,368 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 1d7fcedd064b44ed8ec1c3f57871a630 2024-12-16T21:04:40,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742380_1556 (size=15630) 2024-12-16T21:04:40,368 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/hbase/acl/5ff33ba35f8500425d07b6bfd353cd16/l/1d7fcedd064b44ed8ec1c3f57871a630, entries=12, sequenceid=27, filesize=5.6 K 2024-12-16T21:04:40,368 INFO [regionserver/6b259cf15929:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-16T21:04:40,369 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~1.38 KB/1412, heapSize ~3.31 KB/3392, currentSize=0 B/0 for 5ff33ba35f8500425d07b6bfd353cd16 in 31ms, sequenceid=27, compaction requested=false 2024-12-16T21:04:40,371 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/hbase/acl/5ff33ba35f8500425d07b6bfd353cd16/recovered.edits/30.seqid, newMaxSeqId=30, maxSeqId=1 2024-12-16T21:04:40,372 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T21:04:40,372 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:acl,,1734382824921.5ff33ba35f8500425d07b6bfd353cd16. 2024-12-16T21:04:40,372 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 5ff33ba35f8500425d07b6bfd353cd16: 2024-12-16T21:04:40,372 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:acl,,1734382824921.5ff33ba35f8500425d07b6bfd353cd16. 2024-12-16T21:04:40,372 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing ffdc28263b845c2b16a120a0817fe8a5, disabling compactions & flushes 2024-12-16T21:04:40,372 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734382824291.ffdc28263b845c2b16a120a0817fe8a5. 2024-12-16T21:04:40,372 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734382824291.ffdc28263b845c2b16a120a0817fe8a5. 2024-12-16T21:04:40,372 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734382824291.ffdc28263b845c2b16a120a0817fe8a5. after waiting 0 ms 2024-12-16T21:04:40,372 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734382824291.ffdc28263b845c2b16a120a0817fe8a5. 2024-12-16T21:04:40,372 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing ffdc28263b845c2b16a120a0817fe8a5 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-16T21:04:40,374 INFO [RS_CLOSE_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.26 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/hbase/meta/1588230740/.tmp/info/8aab718f19d248948ae2ab42bf1ac176 2024-12-16T21:04:40,387 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/hbase/namespace/ffdc28263b845c2b16a120a0817fe8a5/.tmp/info/ef5918b7db0d4522a9b8a13bc2d62e49 is 45, key is default/info:d/1734382824848/Put/seqid=0 2024-12-16T21:04:40,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742381_1557 (size=5037) 2024-12-16T21:04:40,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742381_1557 (size=5037) 2024-12-16T21:04:40,391 DEBUG [RS_CLOSE_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/hbase/meta/1588230740/.tmp/rep_barrier/8f0b1491728445169bab2d381256045e is 133, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935129.af64a4a5600e9932d60c8c31a3595921./rep_barrier:/1734382952988/DeleteFamily/seqid=0 2024-12-16T21:04:40,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742381_1557 (size=5037) 2024-12-16T21:04:40,392 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/hbase/namespace/ffdc28263b845c2b16a120a0817fe8a5/.tmp/info/ef5918b7db0d4522a9b8a13bc2d62e49 2024-12-16T21:04:40,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742382_1558 (size=8007) 2024-12-16T21:04:40,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742382_1558 (size=8007) 2024-12-16T21:04:40,396 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/hbase/namespace/ffdc28263b845c2b16a120a0817fe8a5/.tmp/info/ef5918b7db0d4522a9b8a13bc2d62e49 as hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/hbase/namespace/ffdc28263b845c2b16a120a0817fe8a5/info/ef5918b7db0d4522a9b8a13bc2d62e49 2024-12-16T21:04:40,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742382_1558 (size=8007) 2024-12-16T21:04:40,396 INFO [RS_CLOSE_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.34 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/hbase/meta/1588230740/.tmp/rep_barrier/8f0b1491728445169bab2d381256045e 2024-12-16T21:04:40,400 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/hbase/namespace/ffdc28263b845c2b16a120a0817fe8a5/info/ef5918b7db0d4522a9b8a13bc2d62e49, entries=2, sequenceid=6, filesize=4.9 K 2024-12-16T21:04:40,400 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for ffdc28263b845c2b16a120a0817fe8a5 in 28ms, sequenceid=6, compaction requested=false 2024-12-16T21:04:40,403 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/hbase/namespace/ffdc28263b845c2b16a120a0817fe8a5/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T21:04:40,404 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T21:04:40,404 INFO [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1734382824291.ffdc28263b845c2b16a120a0817fe8a5. 2024-12-16T21:04:40,404 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for ffdc28263b845c2b16a120a0817fe8a5: 2024-12-16T21:04:40,404 DEBUG [RS_CLOSE_REGION-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1734382824291.ffdc28263b845c2b16a120a0817fe8a5. 2024-12-16T21:04:40,413 DEBUG [RS_CLOSE_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/hbase/meta/1588230740/.tmp/table/5cb57a2c038f45678052a0a45a064c97 is 127, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1734382935129.af64a4a5600e9932d60c8c31a3595921./table:/1734382952988/DeleteFamily/seqid=0 2024-12-16T21:04:40,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073742383_1559 (size=8861) 2024-12-16T21:04:40,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073742383_1559 (size=8861) 2024-12-16T21:04:40,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073742383_1559 (size=8861) 2024-12-16T21:04:40,418 INFO [RS_CLOSE_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.06 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/hbase/meta/1588230740/.tmp/table/5cb57a2c038f45678052a0a45a064c97 2024-12-16T21:04:40,422 DEBUG [RS_CLOSE_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/hbase/meta/1588230740/.tmp/info/8aab718f19d248948ae2ab42bf1ac176 as hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/hbase/meta/1588230740/info/8aab718f19d248948ae2ab42bf1ac176 2024-12-16T21:04:40,426 INFO [RS_CLOSE_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/hbase/meta/1588230740/info/8aab718f19d248948ae2ab42bf1ac176, entries=84, sequenceid=202, filesize=15.3 K 2024-12-16T21:04:40,426 DEBUG [RS_CLOSE_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/hbase/meta/1588230740/.tmp/rep_barrier/8f0b1491728445169bab2d381256045e as hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/hbase/meta/1588230740/rep_barrier/8f0b1491728445169bab2d381256045e 2024-12-16T21:04:40,430 INFO [RS_CLOSE_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/hbase/meta/1588230740/rep_barrier/8f0b1491728445169bab2d381256045e, entries=21, sequenceid=202, filesize=7.8 K 2024-12-16T21:04:40,430 DEBUG [RS_CLOSE_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/hbase/meta/1588230740/.tmp/table/5cb57a2c038f45678052a0a45a064c97 as hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/hbase/meta/1588230740/table/5cb57a2c038f45678052a0a45a064c97 2024-12-16T21:04:40,434 INFO [RS_CLOSE_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/hbase/meta/1588230740/table/5cb57a2c038f45678052a0a45a064c97, entries=38, sequenceid=202, filesize=8.7 K 2024-12-16T21:04:40,434 INFO [RS_CLOSE_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~68.66 KB/70312, heapSize ~108.95 KB/111568, currentSize=0 B/0 for 1588230740 in 104ms, sequenceid=202, compaction requested=false 2024-12-16T21:04:40,438 DEBUG [RS_CLOSE_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/data/hbase/meta/1588230740/recovered.edits/205.seqid, newMaxSeqId=205, maxSeqId=1 2024-12-16T21:04:40,438 DEBUG [RS_CLOSE_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T21:04:40,438 DEBUG [RS_CLOSE_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-16T21:04:40,438 INFO [RS_CLOSE_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-16T21:04:40,438 DEBUG [RS_CLOSE_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-16T21:04:40,438 DEBUG [RS_CLOSE_META-regionserver/6b259cf15929:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-16T21:04:40,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T21:04:40,449 INFO [RS:1;6b259cf15929:36861 {}] regionserver.HRegionServer(1307): Exiting; stopping=6b259cf15929,36861,1734382821216; zookeeper connection closed. 2024-12-16T21:04:40,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36861-0x1000b4e97660002, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T21:04:40,449 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5cd67452 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5cd67452 2024-12-16T21:04:40,534 INFO [RS:0;6b259cf15929:42323 {}] regionserver.HRegionServer(1250): stopping server 6b259cf15929,42323,1734382821139; all regions closed. 2024-12-16T21:04:40,534 INFO [RS:2;6b259cf15929:41713 {}] regionserver.HRegionServer(1250): stopping server 6b259cf15929,41713,1734382821274; all regions closed. 2024-12-16T21:04:40,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741836_1012 (size=80694) 2024-12-16T21:04:40,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741835_1011 (size=15402) 2024-12-16T21:04:40,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741836_1012 (size=80694) 2024-12-16T21:04:40,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741835_1011 (size=15402) 2024-12-16T21:04:40,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741835_1011 (size=15402) 2024-12-16T21:04:40,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741836_1012 (size=80694) 2024-12-16T21:04:40,538 DEBUG [RS:2;6b259cf15929:41713 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/oldWALs 2024-12-16T21:04:40,538 INFO [RS:2;6b259cf15929:41713 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 6b259cf15929%2C41713%2C1734382821274.meta:.meta(num 1734382824029) 2024-12-16T21:04:40,538 DEBUG [RS:0;6b259cf15929:42323 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/oldWALs 2024-12-16T21:04:40,538 INFO [RS:0;6b259cf15929:42323 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 6b259cf15929%2C42323%2C1734382821139:(num 1734382823435) 2024-12-16T21:04:40,538 DEBUG [RS:0;6b259cf15929:42323 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:04:40,538 INFO [RS:0;6b259cf15929:42323 {}] regionserver.LeaseManager(133): Closed leases 2024-12-16T21:04:40,539 INFO [RS:0;6b259cf15929:42323 {}] hbase.ChoreService(370): Chore service for: regionserver/6b259cf15929:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-16T21:04:40,539 INFO [RS:0;6b259cf15929:42323 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-16T21:04:40,539 INFO [RS:0;6b259cf15929:42323 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-16T21:04:40,539 INFO [RS:0;6b259cf15929:42323 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-16T21:04:40,539 INFO [regionserver/6b259cf15929:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-16T21:04:40,539 INFO [RS:0;6b259cf15929:42323 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:42323 2024-12-16T21:04:40,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44571 is added to blk_1073741833_1009 (size=14789) 2024-12-16T21:04:40,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741833_1009 (size=14789) 2024-12-16T21:04:40,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741833_1009 (size=14789) 2024-12-16T21:04:40,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6b259cf15929,42323,1734382821139 2024-12-16T21:04:40,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-16T21:04:40,542 DEBUG [RS:2;6b259cf15929:41713 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/oldWALs 2024-12-16T21:04:40,542 INFO [RS:2;6b259cf15929:41713 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 6b259cf15929%2C41713%2C1734382821274:(num 1734382823428) 2024-12-16T21:04:40,543 DEBUG [RS:2;6b259cf15929:41713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:04:40,543 INFO [RS:2;6b259cf15929:41713 {}] regionserver.LeaseManager(133): Closed leases 2024-12-16T21:04:40,543 INFO [RS:2;6b259cf15929:41713 {}] hbase.ChoreService(370): Chore service for: regionserver/6b259cf15929:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-16T21:04:40,543 INFO [regionserver/6b259cf15929:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-16T21:04:40,543 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6b259cf15929,42323,1734382821139] 2024-12-16T21:04:40,543 INFO [RS:2;6b259cf15929:41713 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:41713 2024-12-16T21:04:40,543 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 6b259cf15929,42323,1734382821139; numProcessing=2 2024-12-16T21:04:40,545 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/6b259cf15929,42323,1734382821139 already deleted, retry=false 2024-12-16T21:04:40,545 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 6b259cf15929,42323,1734382821139 expired; onlineServers=1 2024-12-16T21:04:40,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-16T21:04:40,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6b259cf15929,41713,1734382821274 2024-12-16T21:04:40,548 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6b259cf15929,41713,1734382821274] 2024-12-16T21:04:40,548 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 6b259cf15929,41713,1734382821274; numProcessing=3 2024-12-16T21:04:40,549 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/6b259cf15929,41713,1734382821274 already deleted, retry=false 2024-12-16T21:04:40,549 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 6b259cf15929,41713,1734382821274 expired; onlineServers=0 2024-12-16T21:04:40,549 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '6b259cf15929,34343,1734382820356' ***** 2024-12-16T21:04:40,549 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-16T21:04:40,549 DEBUG [M:0;6b259cf15929:34343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69e4f098, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6b259cf15929/172.17.0.2:0 2024-12-16T21:04:40,549 INFO [M:0;6b259cf15929:34343 {}] regionserver.HRegionServer(1224): stopping server 6b259cf15929,34343,1734382820356 2024-12-16T21:04:40,549 INFO [M:0;6b259cf15929:34343 {}] regionserver.HRegionServer(1250): stopping server 6b259cf15929,34343,1734382820356; all regions closed. 2024-12-16T21:04:40,549 DEBUG [M:0;6b259cf15929:34343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T21:04:40,549 DEBUG [M:0;6b259cf15929:34343 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-16T21:04:40,550 DEBUG [M:0;6b259cf15929:34343 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-16T21:04:40,550 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-16T21:04:40,550 DEBUG [master/6b259cf15929:0:becomeActiveMaster-HFileCleaner.small.0-1734382823046 {}] cleaner.HFileCleaner(306): Exit Thread[master/6b259cf15929:0:becomeActiveMaster-HFileCleaner.small.0-1734382823046,5,FailOnTimeoutGroup] 2024-12-16T21:04:40,550 DEBUG [master/6b259cf15929:0:becomeActiveMaster-HFileCleaner.large.0-1734382823045 {}] cleaner.HFileCleaner(306): Exit Thread[master/6b259cf15929:0:becomeActiveMaster-HFileCleaner.large.0-1734382823045,5,FailOnTimeoutGroup] 2024-12-16T21:04:40,550 INFO [M:0;6b259cf15929:34343 {}] hbase.ChoreService(370): Chore service for: master/6b259cf15929:0 had [] on shutdown 2024-12-16T21:04:40,550 DEBUG [M:0;6b259cf15929:34343 {}] master.HMaster(1733): Stopping service threads 2024-12-16T21:04:40,550 INFO [M:0;6b259cf15929:34343 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-16T21:04:40,551 INFO [M:0;6b259cf15929:34343 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-16T21:04:40,551 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-16T21:04:40,551 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-16T21:04:40,551 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T21:04:40,551 DEBUG [M:0;6b259cf15929:34343 {}] zookeeper.ZKUtil(347): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-16T21:04:40,551 WARN [M:0;6b259cf15929:34343 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-16T21:04:40,551 INFO [M:0;6b259cf15929:34343 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-16T21:04:40,551 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-16T21:04:40,551 INFO [M:0;6b259cf15929:34343 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-16T21:04:40,551 DEBUG [M:0;6b259cf15929:34343 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-16T21:04:40,565 INFO [M:0;6b259cf15929:34343 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T21:04:40,565 DEBUG [M:0;6b259cf15929:34343 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T21:04:40,565 DEBUG [M:0;6b259cf15929:34343 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-16T21:04:40,565 DEBUG [M:0;6b259cf15929:34343 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T21:04:40,565 INFO [M:0;6b259cf15929:34343 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=805.29 KB heapSize=966.71 KB 2024-12-16T21:04:40,565 ERROR [AsyncFSWAL-0-hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/MasterData-prefix:6b259cf15929,34343,1734382820356 {}] server.NIOServerCnxnFactory(85): Thread Thread[AsyncFSWAL-0-hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/MasterData-prefix:6b259cf15929,34343,1734382820356,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.readableBytes()" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.buffered(FanOutOneBlockAsyncDFSOutput.java:419) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.append(AsyncProtobufLogWriter.java:132) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:830) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:128) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendEntry(AbstractFSWAL.java:1148) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.appendAndSync(AsyncFSWAL.java:500) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.consume(AsyncFSWAL.java:603) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T21:04:40,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T21:04:40,643 INFO [RS:0;6b259cf15929:42323 {}] regionserver.HRegionServer(1307): Exiting; stopping=6b259cf15929,42323,1734382821139; zookeeper connection closed. 2024-12-16T21:04:40,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42323-0x1000b4e97660001, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T21:04:40,644 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@edd58a {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@edd58a 2024-12-16T21:04:40,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T21:04:40,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41713-0x1000b4e97660003, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T21:04:40,648 INFO [RS:2;6b259cf15929:41713 {}] regionserver.HRegionServer(1307): Exiting; stopping=6b259cf15929,41713,1734382821274; zookeeper connection closed. 2024-12-16T21:04:40,648 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@56ac91a1 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@56ac91a1 2024-12-16T21:04:40,648 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-16T21:04:40,871 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T21:04:40,872 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-16T21:04:40,872 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-16T21:04:40,872 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-16T21:04:40,872 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T21:04:40,873 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-16T21:04:40,873 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T21:04:40,873 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-16T21:04:40,873 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-16T21:04:45,848 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-16T21:04:49,283 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T21:05:19,284 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T21:05:21,393 DEBUG [master/6b259cf15929:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=7, created chunk count=10, reused chunk count=22, reuseRatio=68.75% 2024-12-16T21:05:21,394 DEBUG [master/6b259cf15929:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-16T21:05:29,588 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;6b259cf15929:34343 224 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 38 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@7a9c1ebf Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 13 Waited count: 17 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@629049e9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 16 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 3264 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 33 Waiting on java.util.concurrent.CountDownLatch$Sync@45e73e49 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12129 Waited count: 12664 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 13 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@4e22a6cd Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@2e9d71e1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@263e077f): State: TIMED_WAITING Blocked count: 0 Waited count: 648 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 65 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp2988114-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007fc270428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp2988114-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007fc270428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp2988114-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007fc270428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp2988114-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007fc270428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp2988114-41-acceptor-0@26ff4d05-ServerConnector@2d3d9b09{HTTP/1.1, (http/1.1)}{localhost:44909}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp2988114-42): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp2988114-43): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp2988114-44): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-4898edba-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 27 Waited count: 3067 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@426ecade Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 37263): State: TIMED_WAITING Blocked count: 1 Waited count: 34 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 65 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@1f645805): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 108 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@40205119): State: TIMED_WAITING Blocked count: 0 Waited count: 65 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 109 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 31954 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 2 Waited count: 1570 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4b4c89b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 37263): State: TIMED_WAITING Blocked count: 73 Waited count: 2099 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 37263): State: TIMED_WAITING Blocked count: 85 Waited count: 2107 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 37263): State: TIMED_WAITING Blocked count: 79 Waited count: 2089 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 37263): State: TIMED_WAITING Blocked count: 66 Waited count: 2086 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 37263): State: TIMED_WAITING Blocked count: 66 Waited count: 2097 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@34326363): State: TIMED_WAITING Blocked count: 0 Waited count: 162 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@21d9c23a): State: TIMED_WAITING Blocked count: 0 Waited count: 65 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2b27bce4): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@6f79ee98): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1603592889)): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp671127358-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007fc270428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp671127358-88-acceptor-0@754170a3-ServerConnector@39bccd2b{HTTP/1.1, (http/1.1)}{localhost:43589}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp671127358-89): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp671127358-90): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-801d092-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@3509b2ec): State: TIMED_WAITING Blocked count: 0 Waited count: 645 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 37339): State: TIMED_WAITING Blocked count: 1 Waited count: 34 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 65 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 232 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@623db8d0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1970874171-172.17.0.2-1734382816445 heartbeating to localhost/127.0.0.1:37263): State: TIMED_WAITING Blocked count: 1243 Waited count: 1340 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@3f77ee2e): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 37339): State: TIMED_WAITING Blocked count: 0 Waited count: 332 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 37339): State: TIMED_WAITING Blocked count: 0 Waited count: 328 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 37339): State: TIMED_WAITING Blocked count: 0 Waited count: 332 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 37339): State: TIMED_WAITING Blocked count: 0 Waited count: 331 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 37339): State: TIMED_WAITING Blocked count: 0 Waited count: 324 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp2042583043-121): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007fc270428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (IPC Client (1891196410) connection to localhost/127.0.0.1:37263 from jenkins): State: TIMED_WAITING Blocked count: 1247 Waited count: 1248 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 122 (qtp2042583043-122-acceptor-0@2935c2d7-ServerConnector@5be8101a{HTTP/1.1, (http/1.1)}{localhost:46059}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (IPC Parameter Sending Thread for localhost/127.0.0.1:37263): State: TIMED_WAITING Blocked count: 0 Waited count: 1897 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp2042583043-123): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp2042583043-124): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-67a2be6d-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@5731364a): State: TIMED_WAITING Blocked count: 0 Waited count: 645 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 44195): State: TIMED_WAITING Blocked count: 1 Waited count: 34 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 65 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (Command processor): State: WAITING Blocked count: 1 Waited count: 276 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5e6823d8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 137 (BP-1970874171-172.17.0.2-1734382816445 heartbeating to localhost/127.0.0.1:37263): State: TIMED_WAITING Blocked count: 1238 Waited count: 1337 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@61da8f0b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 44195): State: TIMED_WAITING Blocked count: 0 Waited count: 323 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 44195): State: TIMED_WAITING Blocked count: 0 Waited count: 323 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 44195): State: TIMED_WAITING Blocked count: 0 Waited count: 323 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 44195): State: TIMED_WAITING Blocked count: 0 Waited count: 324 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 144 (IPC Server handler 4 on default port 44195): State: TIMED_WAITING Blocked count: 0 Waited count: 324 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 155 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1446323025-156): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007fc270428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp1446323025-158-acceptor-0@3c5408ed-ServerConnector@2cc41f37{HTTP/1.1, (http/1.1)}{localhost:40415}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp1446323025-159): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp1446323025-160): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Session-HouseKeeper-156b960-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data3)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data1)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data4)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data2)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 174 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data1/current/BP-1970874171-172.17.0.2-1734382816445): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 175 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data3/current/BP-1970874171-172.17.0.2-1734382816445): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data2/current/BP-1970874171-172.17.0.2-1734382816445): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data4/current/BP-1970874171-172.17.0.2-1734382816445): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 183 (ForkJoinPool-2-worker-2): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 185 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 189 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@7666605a): State: TIMED_WAITING Blocked count: 0 Waited count: 644 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 193 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 194 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 195 (IPC Server idle connection scanner for port 38085): State: TIMED_WAITING Blocked count: 1 Waited count: 34 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 200 (java.util.concurrent.ThreadPoolExecutor$Worker@6cb1b31e[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 65 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (java.util.concurrent.ThreadPoolExecutor$Worker@4de44e5a[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (Command processor): State: WAITING Blocked count: 1 Waited count: 244 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a55c4ea Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 208 (BP-1970874171-172.17.0.2-1734382816445 heartbeating to localhost/127.0.0.1:37263): State: TIMED_WAITING Blocked count: 1215 Waited count: 1333 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 209 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@11e89da1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 192 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 210 (IPC Server handler 0 on default port 38085): State: TIMED_WAITING Blocked count: 0 Waited count: 322 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 1 on default port 38085): State: TIMED_WAITING Blocked count: 0 Waited count: 324 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 2 on default port 38085): State: TIMED_WAITING Blocked count: 0 Waited count: 324 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 3 on default port 38085): State: TIMED_WAITING Blocked count: 0 Waited count: 328 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 214 (IPC Server handler 4 on default port 38085): State: TIMED_WAITING Blocked count: 0 Waited count: 322 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 217 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 218 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 222 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data5/current/BP-1970874171-172.17.0.2-1734382816445): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 224 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data6/current/BP-1970874171-172.17.0.2-1734382816445): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (java.util.concurrent.ThreadPoolExecutor$Worker@d839c15[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 236 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 239 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 5 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 10 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:61742): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 237 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 34 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 241 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 162 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 242 (SyncThread:0): State: WAITING Blocked count: 26 Waited count: 720 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5c2cf318 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 243 (ProcessThread(sid:0 cport:61742):): State: WAITING Blocked count: 2 Waited count: 818 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@47961e7c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 244 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 855 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f227c03 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 245 (NIOWorkerThread-1): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 247 (LeaseRenewer:jenkins@localhost:37263): State: TIMED_WAITING Blocked count: 8 Waited count: 332 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7290f4e9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 389 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 23 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (Time-limited test-SendThread(127.0.0.1:61742)): State: RUNNABLE Blocked count: 12 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 260 (Time-limited test-EventThread): State: WAITING Blocked count: 3 Waited count: 59 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@591cd7fe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 261 (NIOWorkerThread-2): State: WAITING Blocked count: 1 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-3): State: WAITING Blocked count: 5 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-4): State: WAITING Blocked count: 4 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (zk-event-processor-pool-0): State: WAITING Blocked count: 35 Waited count: 88 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6b66664e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-5): State: WAITING Blocked count: 1 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-6): State: WAITING Blocked count: 0 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-7): State: WAITING Blocked count: 4 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-8): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-9): State: WAITING Blocked count: 5 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-10): State: WAITING Blocked count: 0 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-12): State: WAITING Blocked count: 0 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-14): State: WAITING Blocked count: 5 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-15): State: WAITING Blocked count: 4 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34343): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1ea4f077 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343): State: WAITING Blocked count: 88 Waited count: 354 Waiting on java.util.concurrent.Semaphore$NonfairSync@8395133 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343): State: WAITING Blocked count: 213 Waited count: 797 Waiting on java.util.concurrent.Semaphore$NonfairSync@3725c4ed Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34343): State: WAITING Blocked count: 122 Waited count: 5044 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6fa98051 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34343): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56b7f29b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34343): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56b7f29b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=34343): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@144591b7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=34343): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@d5e2d7e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34343): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2e569f8c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=34343): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@471c93f7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 290 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 312 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 334 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 72 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 288 (M:0;6b259cf15929:34343): State: TIMED_WAITING Blocked count: 6 Waited count: 2597 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$954/0x00007fc270eeebe8.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) Thread 357 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 32 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (master/6b259cf15929:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (master/6b259cf15929:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (org.apache.hadoop.hdfs.PeerCache@1583aea9): State: TIMED_WAITING Blocked count: 0 Waited count: 107 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 382 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 3183 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 399 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 105 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 400 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 105 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (Idle-Rpc-Conn-Sweeper-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 55 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 423 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 32 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 411 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 31758 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 432 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 19 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 433 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 24 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 458 (RegionServerTracker-0): State: WAITING Blocked count: 10 Waited count: 14 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@9f50460 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 481 (regionserver/6b259cf15929:0.procedureResultReporter): State: WAITING Blocked count: 18 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3e1116dd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 480 (regionserver/6b259cf15929:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2f2c3639 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 482 (regionserver/6b259cf15929:0.procedureResultReporter): State: WAITING Blocked count: 11 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6a2aaa9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 531 (region-location-0): State: WAITING Blocked count: 9 Waited count: 14 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27556819 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 551 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 556 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 568 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 31558 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 581 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 590 (ForkJoinPool.commonPool-worker-1): State: WAITING Blocked count: 0 Waited count: 163 Waiting on java.util.concurrent.ForkJoinPool@114425d0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 591 (ForkJoinPool.commonPool-worker-2): State: TIMED_WAITING Blocked count: 0 Waited count: 713 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 592 (ForkJoinPool.commonPool-worker-3): State: WAITING Blocked count: 0 Waited count: 577 Waiting on java.util.concurrent.ForkJoinPool@114425d0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 613 (region-location-1): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27556819 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 614 (region-location-2): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27556819 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 615 (region-location-3): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27556819 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1019 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 379 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1080 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1108 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1121 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 63 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4cf35a86 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1178 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1179 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1532 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@40eaf6a1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2018 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3261 (region-location-4): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27556819 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4953 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4954 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4955 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 7991 (ForkJoinPool.commonPool-worker-5): State: WAITING Blocked count: 0 Waited count: 51 Waiting on java.util.concurrent.ForkJoinPool@114425d0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 8934 (AsyncFSWAL-1-hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/MasterData-prefix:6b259cf15929,34343,1734382820356): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70b021c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8938 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-16T21:05:49,284 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T21:06:19,284 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;6b259cf15929:34343 221 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 38 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@7a9c1ebf Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 13 Waited count: 18 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 22 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@629049e9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 3863 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 39 Waiting on java.util.concurrent.CountDownLatch$Sync@43927a0c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12129 Waited count: 12665 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 13 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@4e22a6cd Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@2e9d71e1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@263e077f): State: TIMED_WAITING Blocked count: 0 Waited count: 768 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 77 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp2988114-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007fc270428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp2988114-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007fc270428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp2988114-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007fc270428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp2988114-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007fc270428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp2988114-41-acceptor-0@26ff4d05-ServerConnector@2d3d9b09{HTTP/1.1, (http/1.1)}{localhost:44909}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp2988114-42): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp2988114-43): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp2988114-44): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-4898edba-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 27 Waited count: 3067 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@426ecade Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 37263): State: TIMED_WAITING Blocked count: 1 Waited count: 40 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 77 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@1f645805): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 128 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@40205119): State: TIMED_WAITING Blocked count: 0 Waited count: 77 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 129 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 37919 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 2 Waited count: 1570 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4b4c89b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 37263): State: TIMED_WAITING Blocked count: 73 Waited count: 2160 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 37263): State: TIMED_WAITING Blocked count: 85 Waited count: 2169 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 37263): State: TIMED_WAITING Blocked count: 79 Waited count: 2150 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 37263): State: TIMED_WAITING Blocked count: 66 Waited count: 2148 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 37263): State: TIMED_WAITING Blocked count: 66 Waited count: 2158 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@34326363): State: TIMED_WAITING Blocked count: 0 Waited count: 192 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@21d9c23a): State: TIMED_WAITING Blocked count: 0 Waited count: 77 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2b27bce4): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@6f79ee98): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1603592889)): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp671127358-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007fc270428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp671127358-88-acceptor-0@754170a3-ServerConnector@39bccd2b{HTTP/1.1, (http/1.1)}{localhost:43589}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp671127358-89): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp671127358-90): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-801d092-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@3509b2ec): State: TIMED_WAITING Blocked count: 0 Waited count: 765 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 37339): State: TIMED_WAITING Blocked count: 1 Waited count: 40 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 77 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 252 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@623db8d0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1970874171-172.17.0.2-1734382816445 heartbeating to localhost/127.0.0.1:37263): State: TIMED_WAITING Blocked count: 1263 Waited count: 1380 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@3f77ee2e): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 37339): State: TIMED_WAITING Blocked count: 0 Waited count: 392 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 37339): State: TIMED_WAITING Blocked count: 0 Waited count: 388 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 37339): State: TIMED_WAITING Blocked count: 0 Waited count: 392 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 37339): State: TIMED_WAITING Blocked count: 0 Waited count: 391 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 37339): State: TIMED_WAITING Blocked count: 0 Waited count: 384 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp2042583043-121): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007fc270428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (IPC Client (1891196410) connection to localhost/127.0.0.1:37263 from jenkins): State: TIMED_WAITING Blocked count: 1307 Waited count: 1308 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 122 (qtp2042583043-122-acceptor-0@2935c2d7-ServerConnector@5be8101a{HTTP/1.1, (http/1.1)}{localhost:46059}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (IPC Parameter Sending Thread for localhost/127.0.0.1:37263): State: TIMED_WAITING Blocked count: 0 Waited count: 1957 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp2042583043-123): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp2042583043-124): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-67a2be6d-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@5731364a): State: TIMED_WAITING Blocked count: 0 Waited count: 765 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 44195): State: TIMED_WAITING Blocked count: 1 Waited count: 40 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 77 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (Command processor): State: WAITING Blocked count: 1 Waited count: 296 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5e6823d8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 137 (BP-1970874171-172.17.0.2-1734382816445 heartbeating to localhost/127.0.0.1:37263): State: TIMED_WAITING Blocked count: 1258 Waited count: 1377 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@61da8f0b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 44195): State: TIMED_WAITING Blocked count: 0 Waited count: 383 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 44195): State: TIMED_WAITING Blocked count: 0 Waited count: 383 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 44195): State: TIMED_WAITING Blocked count: 0 Waited count: 383 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 44195): State: TIMED_WAITING Blocked count: 0 Waited count: 384 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 144 (IPC Server handler 4 on default port 44195): State: TIMED_WAITING Blocked count: 0 Waited count: 384 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 155 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1446323025-156): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007fc270428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp1446323025-158-acceptor-0@3c5408ed-ServerConnector@2cc41f37{HTTP/1.1, (http/1.1)}{localhost:40415}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp1446323025-159): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp1446323025-160): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Session-HouseKeeper-156b960-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data3)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data1)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data4)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data2)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 174 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data1/current/BP-1970874171-172.17.0.2-1734382816445): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 175 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data3/current/BP-1970874171-172.17.0.2-1734382816445): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data2/current/BP-1970874171-172.17.0.2-1734382816445): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data4/current/BP-1970874171-172.17.0.2-1734382816445): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 185 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 189 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@7666605a): State: TIMED_WAITING Blocked count: 0 Waited count: 764 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 193 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 194 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 195 (IPC Server idle connection scanner for port 38085): State: TIMED_WAITING Blocked count: 1 Waited count: 40 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 200 (java.util.concurrent.ThreadPoolExecutor$Worker@6cb1b31e[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 77 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (java.util.concurrent.ThreadPoolExecutor$Worker@4de44e5a[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (Command processor): State: WAITING Blocked count: 1 Waited count: 264 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a55c4ea Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 208 (BP-1970874171-172.17.0.2-1734382816445 heartbeating to localhost/127.0.0.1:37263): State: TIMED_WAITING Blocked count: 1235 Waited count: 1373 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 209 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@11e89da1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 192 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 210 (IPC Server handler 0 on default port 38085): State: TIMED_WAITING Blocked count: 0 Waited count: 382 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 1 on default port 38085): State: TIMED_WAITING Blocked count: 0 Waited count: 384 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 2 on default port 38085): State: TIMED_WAITING Blocked count: 0 Waited count: 386 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 3 on default port 38085): State: TIMED_WAITING Blocked count: 0 Waited count: 388 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 214 (IPC Server handler 4 on default port 38085): State: TIMED_WAITING Blocked count: 0 Waited count: 382 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 217 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 218 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 222 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data5/current/BP-1970874171-172.17.0.2-1734382816445): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 224 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data6/current/BP-1970874171-172.17.0.2-1734382816445): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (java.util.concurrent.ThreadPoolExecutor$Worker@d839c15[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 236 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 239 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 5 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 10 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:61742): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 237 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 40 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 241 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 192 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 242 (SyncThread:0): State: WAITING Blocked count: 26 Waited count: 725 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5c2cf318 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 243 (ProcessThread(sid:0 cport:61742):): State: WAITING Blocked count: 2 Waited count: 823 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@47961e7c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 244 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 860 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f227c03 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 245 (NIOWorkerThread-1): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7290f4e9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 449 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 23 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (Time-limited test-SendThread(127.0.0.1:61742)): State: RUNNABLE Blocked count: 12 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 260 (Time-limited test-EventThread): State: WAITING Blocked count: 3 Waited count: 59 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@591cd7fe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 261 (NIOWorkerThread-2): State: WAITING Blocked count: 1 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-3): State: WAITING Blocked count: 5 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-4): State: WAITING Blocked count: 4 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (zk-event-processor-pool-0): State: WAITING Blocked count: 35 Waited count: 88 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6b66664e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-5): State: WAITING Blocked count: 1 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-6): State: WAITING Blocked count: 0 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-7): State: WAITING Blocked count: 4 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-8): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-9): State: WAITING Blocked count: 5 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-10): State: WAITING Blocked count: 0 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-12): State: WAITING Blocked count: 0 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-14): State: WAITING Blocked count: 5 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-15): State: WAITING Blocked count: 4 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34343): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1ea4f077 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343): State: WAITING Blocked count: 88 Waited count: 354 Waiting on java.util.concurrent.Semaphore$NonfairSync@8395133 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343): State: WAITING Blocked count: 213 Waited count: 797 Waiting on java.util.concurrent.Semaphore$NonfairSync@3725c4ed Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34343): State: WAITING Blocked count: 122 Waited count: 5044 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6fa98051 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34343): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56b7f29b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34343): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56b7f29b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=34343): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@144591b7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=34343): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@d5e2d7e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34343): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2e569f8c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=34343): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@471c93f7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 290 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 312 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 334 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 72 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 288 (M:0;6b259cf15929:34343): State: TIMED_WAITING Blocked count: 6 Waited count: 2597 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$954/0x00007fc270eeebe8.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) Thread 357 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 38 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (master/6b259cf15929:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (master/6b259cf15929:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (org.apache.hadoop.hdfs.PeerCache@1583aea9): State: TIMED_WAITING Blocked count: 0 Waited count: 127 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 382 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 3783 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 399 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 105 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 400 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 105 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 68 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3a35632d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 423 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 38 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 411 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 37760 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 432 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 19 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 433 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 24 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 458 (RegionServerTracker-0): State: WAITING Blocked count: 10 Waited count: 14 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@9f50460 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 481 (regionserver/6b259cf15929:0.procedureResultReporter): State: WAITING Blocked count: 18 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3e1116dd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 480 (regionserver/6b259cf15929:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2f2c3639 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 482 (regionserver/6b259cf15929:0.procedureResultReporter): State: WAITING Blocked count: 11 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6a2aaa9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 531 (region-location-0): State: WAITING Blocked count: 9 Waited count: 14 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27556819 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 551 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 556 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 568 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 37560 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 581 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 590 (ForkJoinPool.commonPool-worker-1): State: WAITING Blocked count: 0 Waited count: 163 Waiting on java.util.concurrent.ForkJoinPool@114425d0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 592 (ForkJoinPool.commonPool-worker-3): State: WAITING Blocked count: 0 Waited count: 577 Waiting on java.util.concurrent.ForkJoinPool@114425d0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 613 (region-location-1): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27556819 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 614 (region-location-2): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27556819 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 615 (region-location-3): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27556819 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1019 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 385 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1080 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1108 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1121 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 63 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4cf35a86 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1178 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1179 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1532 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@40eaf6a1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2018 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3261 (region-location-4): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27556819 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4953 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4954 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4955 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 7991 (ForkJoinPool.commonPool-worker-5): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 8934 (AsyncFSWAL-1-hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/MasterData-prefix:6b259cf15929,34343,1734382820356): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70b021c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8938 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-16T21:06:49,284 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T21:07:19,284 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;6b259cf15929:34343 220 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 38 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@7a9c1ebf Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 13 Waited count: 19 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@629049e9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 22 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 4463 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 45 Waiting on java.util.concurrent.CountDownLatch$Sync@545ad321 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12129 Waited count: 12666 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 13 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@4e22a6cd Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@2e9d71e1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@263e077f): State: TIMED_WAITING Blocked count: 0 Waited count: 888 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 89 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp2988114-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007fc270428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp2988114-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007fc270428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp2988114-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007fc270428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp2988114-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007fc270428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp2988114-41-acceptor-0@26ff4d05-ServerConnector@2d3d9b09{HTTP/1.1, (http/1.1)}{localhost:44909}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp2988114-42): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp2988114-43): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp2988114-44): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-4898edba-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 27 Waited count: 3067 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@426ecade Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 37263): State: TIMED_WAITING Blocked count: 1 Waited count: 46 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 89 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@1f645805): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 148 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@40205119): State: TIMED_WAITING Blocked count: 0 Waited count: 89 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 149 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 43883 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 2 Waited count: 1570 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4b4c89b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 37263): State: TIMED_WAITING Blocked count: 73 Waited count: 2221 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 37263): State: TIMED_WAITING Blocked count: 85 Waited count: 2230 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 37263): State: TIMED_WAITING Blocked count: 79 Waited count: 2212 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 37263): State: TIMED_WAITING Blocked count: 66 Waited count: 2209 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 37263): State: TIMED_WAITING Blocked count: 66 Waited count: 2219 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@34326363): State: TIMED_WAITING Blocked count: 0 Waited count: 222 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@21d9c23a): State: TIMED_WAITING Blocked count: 0 Waited count: 89 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2b27bce4): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@6f79ee98): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1603592889)): State: TIMED_WAITING Blocked count: 0 Waited count: 16 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp671127358-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007fc270428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp671127358-88-acceptor-0@754170a3-ServerConnector@39bccd2b{HTTP/1.1, (http/1.1)}{localhost:43589}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp671127358-89): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp671127358-90): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-801d092-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@3509b2ec): State: TIMED_WAITING Blocked count: 0 Waited count: 885 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 37339): State: TIMED_WAITING Blocked count: 1 Waited count: 46 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 89 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 272 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@623db8d0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1970874171-172.17.0.2-1734382816445 heartbeating to localhost/127.0.0.1:37263): State: TIMED_WAITING Blocked count: 1283 Waited count: 1420 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@3f77ee2e): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 37339): State: TIMED_WAITING Blocked count: 0 Waited count: 456 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 37339): State: TIMED_WAITING Blocked count: 0 Waited count: 456 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 37339): State: TIMED_WAITING Blocked count: 0 Waited count: 457 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 37339): State: TIMED_WAITING Blocked count: 0 Waited count: 461 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 37339): State: TIMED_WAITING Blocked count: 0 Waited count: 444 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp2042583043-121): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007fc270428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (IPC Client (1891196410) connection to localhost/127.0.0.1:37263 from jenkins): State: TIMED_WAITING Blocked count: 1367 Waited count: 1368 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 122 (qtp2042583043-122-acceptor-0@2935c2d7-ServerConnector@5be8101a{HTTP/1.1, (http/1.1)}{localhost:46059}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (IPC Parameter Sending Thread for localhost/127.0.0.1:37263): State: TIMED_WAITING Blocked count: 0 Waited count: 2017 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp2042583043-123): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp2042583043-124): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-67a2be6d-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@5731364a): State: TIMED_WAITING Blocked count: 0 Waited count: 885 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 44195): State: TIMED_WAITING Blocked count: 1 Waited count: 46 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 89 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (Command processor): State: WAITING Blocked count: 1 Waited count: 316 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5e6823d8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 137 (BP-1970874171-172.17.0.2-1734382816445 heartbeating to localhost/127.0.0.1:37263): State: TIMED_WAITING Blocked count: 1278 Waited count: 1417 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@61da8f0b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 44195): State: TIMED_WAITING Blocked count: 0 Waited count: 443 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 44195): State: TIMED_WAITING Blocked count: 0 Waited count: 443 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 44195): State: TIMED_WAITING Blocked count: 0 Waited count: 443 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 44195): State: TIMED_WAITING Blocked count: 0 Waited count: 444 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 144 (IPC Server handler 4 on default port 44195): State: TIMED_WAITING Blocked count: 0 Waited count: 444 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 155 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1446323025-156): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007fc270428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp1446323025-158-acceptor-0@3c5408ed-ServerConnector@2cc41f37{HTTP/1.1, (http/1.1)}{localhost:40415}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp1446323025-159): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp1446323025-160): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Session-HouseKeeper-156b960-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data3)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data1)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data4)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data2)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 174 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data1/current/BP-1970874171-172.17.0.2-1734382816445): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 175 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data3/current/BP-1970874171-172.17.0.2-1734382816445): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data2/current/BP-1970874171-172.17.0.2-1734382816445): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data4/current/BP-1970874171-172.17.0.2-1734382816445): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 185 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 189 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@7666605a): State: TIMED_WAITING Blocked count: 0 Waited count: 884 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 193 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 194 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 195 (IPC Server idle connection scanner for port 38085): State: TIMED_WAITING Blocked count: 1 Waited count: 46 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 200 (java.util.concurrent.ThreadPoolExecutor$Worker@6cb1b31e[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 89 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (java.util.concurrent.ThreadPoolExecutor$Worker@4de44e5a[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (Command processor): State: WAITING Blocked count: 1 Waited count: 284 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a55c4ea Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 208 (BP-1970874171-172.17.0.2-1734382816445 heartbeating to localhost/127.0.0.1:37263): State: TIMED_WAITING Blocked count: 1255 Waited count: 1413 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 209 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@11e89da1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 192 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 210 (IPC Server handler 0 on default port 38085): State: TIMED_WAITING Blocked count: 0 Waited count: 442 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 1 on default port 38085): State: TIMED_WAITING Blocked count: 0 Waited count: 444 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 2 on default port 38085): State: TIMED_WAITING Blocked count: 0 Waited count: 447 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 3 on default port 38085): State: TIMED_WAITING Blocked count: 0 Waited count: 448 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 214 (IPC Server handler 4 on default port 38085): State: TIMED_WAITING Blocked count: 0 Waited count: 442 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 217 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 218 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 222 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data5/current/BP-1970874171-172.17.0.2-1734382816445): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 224 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data6/current/BP-1970874171-172.17.0.2-1734382816445): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (java.util.concurrent.ThreadPoolExecutor$Worker@d839c15[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 236 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 239 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 5 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 10 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:61742): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 237 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 46 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 241 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 222 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 242 (SyncThread:0): State: WAITING Blocked count: 26 Waited count: 729 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5c2cf318 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 243 (ProcessThread(sid:0 cport:61742):): State: WAITING Blocked count: 2 Waited count: 827 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@47961e7c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 244 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 864 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f227c03 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 245 (NIOWorkerThread-1): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7290f4e9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 509 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 23 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (Time-limited test-SendThread(127.0.0.1:61742)): State: RUNNABLE Blocked count: 12 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 260 (Time-limited test-EventThread): State: WAITING Blocked count: 3 Waited count: 59 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@591cd7fe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 261 (NIOWorkerThread-2): State: WAITING Blocked count: 1 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-3): State: WAITING Blocked count: 5 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-4): State: WAITING Blocked count: 4 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (zk-event-processor-pool-0): State: WAITING Blocked count: 35 Waited count: 88 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6b66664e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-5): State: WAITING Blocked count: 1 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-6): State: WAITING Blocked count: 0 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-7): State: WAITING Blocked count: 4 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-8): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-9): State: WAITING Blocked count: 5 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-10): State: WAITING Blocked count: 0 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-12): State: WAITING Blocked count: 0 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-14): State: WAITING Blocked count: 5 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-15): State: WAITING Blocked count: 4 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34343): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1ea4f077 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343): State: WAITING Blocked count: 88 Waited count: 354 Waiting on java.util.concurrent.Semaphore$NonfairSync@8395133 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343): State: WAITING Blocked count: 213 Waited count: 797 Waiting on java.util.concurrent.Semaphore$NonfairSync@3725c4ed Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34343): State: WAITING Blocked count: 122 Waited count: 5044 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6fa98051 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34343): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56b7f29b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34343): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56b7f29b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=34343): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@144591b7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=34343): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@d5e2d7e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34343): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2e569f8c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=34343): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@471c93f7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 290 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 312 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 334 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 72 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 288 (M:0;6b259cf15929:34343): State: TIMED_WAITING Blocked count: 6 Waited count: 2597 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$954/0x00007fc270eeebe8.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) Thread 357 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 44 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (master/6b259cf15929:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (master/6b259cf15929:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (org.apache.hadoop.hdfs.PeerCache@1583aea9): State: TIMED_WAITING Blocked count: 0 Waited count: 147 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 382 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 4382 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 399 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 105 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 400 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 105 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 68 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3a35632d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 423 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 44 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 411 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 43761 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 432 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 19 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 433 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 24 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 458 (RegionServerTracker-0): State: WAITING Blocked count: 10 Waited count: 14 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@9f50460 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 481 (regionserver/6b259cf15929:0.procedureResultReporter): State: WAITING Blocked count: 18 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3e1116dd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 480 (regionserver/6b259cf15929:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2f2c3639 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 482 (regionserver/6b259cf15929:0.procedureResultReporter): State: WAITING Blocked count: 11 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6a2aaa9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 531 (region-location-0): State: WAITING Blocked count: 9 Waited count: 14 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27556819 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 551 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 556 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 568 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 43562 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 581 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 590 (ForkJoinPool.commonPool-worker-1): State: TIMED_WAITING Blocked count: 0 Waited count: 164 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 592 (ForkJoinPool.commonPool-worker-3): State: WAITING Blocked count: 0 Waited count: 577 Waiting on java.util.concurrent.ForkJoinPool@114425d0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 613 (region-location-1): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27556819 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 614 (region-location-2): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27556819 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 615 (region-location-3): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27556819 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1019 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 391 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1080 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1108 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1121 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 63 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4cf35a86 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1178 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1179 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1532 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@40eaf6a1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2018 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3261 (region-location-4): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27556819 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4953 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4954 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4955 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8934 (AsyncFSWAL-1-hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/MasterData-prefix:6b259cf15929,34343,1734382820356): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70b021c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8938 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-16T21:07:49,285 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T21:08:19,285 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;6b259cf15929:34343 219 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 38 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@7a9c1ebf Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 13 Waited count: 20 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 28 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@629049e9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 25 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5063 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 51 Waiting on java.util.concurrent.CountDownLatch$Sync@25951e0a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12129 Waited count: 12667 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 13 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@4e22a6cd Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@2e9d71e1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@263e077f): State: TIMED_WAITING Blocked count: 0 Waited count: 1008 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 101 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp2988114-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007fc270428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp2988114-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007fc270428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp2988114-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007fc270428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp2988114-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007fc270428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp2988114-41-acceptor-0@26ff4d05-ServerConnector@2d3d9b09{HTTP/1.1, (http/1.1)}{localhost:44909}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp2988114-42): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp2988114-43): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp2988114-44): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-4898edba-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 27 Waited count: 3067 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@426ecade Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 37263): State: TIMED_WAITING Blocked count: 1 Waited count: 52 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 101 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@1f645805): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 168 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@40205119): State: TIMED_WAITING Blocked count: 0 Waited count: 101 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 169 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 49847 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 2 Waited count: 1570 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4b4c89b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 37263): State: TIMED_WAITING Blocked count: 73 Waited count: 2283 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 37263): State: TIMED_WAITING Blocked count: 85 Waited count: 2291 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 37263): State: TIMED_WAITING Blocked count: 79 Waited count: 2273 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 37263): State: TIMED_WAITING Blocked count: 66 Waited count: 2270 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 37263): State: TIMED_WAITING Blocked count: 66 Waited count: 2281 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@34326363): State: TIMED_WAITING Blocked count: 0 Waited count: 252 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@21d9c23a): State: TIMED_WAITING Blocked count: 0 Waited count: 101 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2b27bce4): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@6f79ee98): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1603592889)): State: TIMED_WAITING Blocked count: 0 Waited count: 18 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp671127358-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007fc270428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp671127358-88-acceptor-0@754170a3-ServerConnector@39bccd2b{HTTP/1.1, (http/1.1)}{localhost:43589}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp671127358-89): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp671127358-90): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-801d092-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@3509b2ec): State: TIMED_WAITING Blocked count: 0 Waited count: 1005 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 37339): State: TIMED_WAITING Blocked count: 1 Waited count: 52 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 101 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 292 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@623db8d0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1970874171-172.17.0.2-1734382816445 heartbeating to localhost/127.0.0.1:37263): State: TIMED_WAITING Blocked count: 1303 Waited count: 1460 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@3f77ee2e): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 37339): State: TIMED_WAITING Blocked count: 0 Waited count: 524 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 37339): State: TIMED_WAITING Blocked count: 0 Waited count: 518 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 37339): State: TIMED_WAITING Blocked count: 0 Waited count: 528 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 37339): State: TIMED_WAITING Blocked count: 0 Waited count: 526 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 37339): State: TIMED_WAITING Blocked count: 0 Waited count: 504 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp2042583043-121): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007fc270428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (IPC Client (1891196410) connection to localhost/127.0.0.1:37263 from jenkins): State: TIMED_WAITING Blocked count: 1427 Waited count: 1428 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 122 (qtp2042583043-122-acceptor-0@2935c2d7-ServerConnector@5be8101a{HTTP/1.1, (http/1.1)}{localhost:46059}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (IPC Parameter Sending Thread for localhost/127.0.0.1:37263): State: TIMED_WAITING Blocked count: 0 Waited count: 2077 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp2042583043-123): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp2042583043-124): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-67a2be6d-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@5731364a): State: TIMED_WAITING Blocked count: 0 Waited count: 1005 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 44195): State: TIMED_WAITING Blocked count: 1 Waited count: 52 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 101 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (Command processor): State: WAITING Blocked count: 1 Waited count: 336 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5e6823d8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 137 (BP-1970874171-172.17.0.2-1734382816445 heartbeating to localhost/127.0.0.1:37263): State: TIMED_WAITING Blocked count: 1298 Waited count: 1457 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@61da8f0b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 44195): State: TIMED_WAITING Blocked count: 0 Waited count: 503 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 44195): State: TIMED_WAITING Blocked count: 0 Waited count: 503 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 44195): State: TIMED_WAITING Blocked count: 0 Waited count: 503 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 44195): State: TIMED_WAITING Blocked count: 0 Waited count: 504 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 144 (IPC Server handler 4 on default port 44195): State: TIMED_WAITING Blocked count: 0 Waited count: 504 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 155 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1446323025-156): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007fc270428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp1446323025-158-acceptor-0@3c5408ed-ServerConnector@2cc41f37{HTTP/1.1, (http/1.1)}{localhost:40415}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp1446323025-159): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp1446323025-160): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Session-HouseKeeper-156b960-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data3)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data1)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data4)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data2)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 174 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data1/current/BP-1970874171-172.17.0.2-1734382816445): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 175 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data3/current/BP-1970874171-172.17.0.2-1734382816445): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data2/current/BP-1970874171-172.17.0.2-1734382816445): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data4/current/BP-1970874171-172.17.0.2-1734382816445): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 185 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 189 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@7666605a): State: TIMED_WAITING Blocked count: 0 Waited count: 1004 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 193 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 194 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 195 (IPC Server idle connection scanner for port 38085): State: TIMED_WAITING Blocked count: 1 Waited count: 52 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 200 (java.util.concurrent.ThreadPoolExecutor$Worker@6cb1b31e[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 101 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (java.util.concurrent.ThreadPoolExecutor$Worker@4de44e5a[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (Command processor): State: WAITING Blocked count: 1 Waited count: 304 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a55c4ea Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 208 (BP-1970874171-172.17.0.2-1734382816445 heartbeating to localhost/127.0.0.1:37263): State: TIMED_WAITING Blocked count: 1275 Waited count: 1453 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 209 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@11e89da1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 192 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 210 (IPC Server handler 0 on default port 38085): State: TIMED_WAITING Blocked count: 0 Waited count: 502 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 1 on default port 38085): State: TIMED_WAITING Blocked count: 0 Waited count: 507 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 2 on default port 38085): State: TIMED_WAITING Blocked count: 0 Waited count: 508 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 3 on default port 38085): State: TIMED_WAITING Blocked count: 0 Waited count: 508 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 214 (IPC Server handler 4 on default port 38085): State: TIMED_WAITING Blocked count: 0 Waited count: 502 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 217 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 218 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 222 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data5/current/BP-1970874171-172.17.0.2-1734382816445): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 224 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data6/current/BP-1970874171-172.17.0.2-1734382816445): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (java.util.concurrent.ThreadPoolExecutor$Worker@d839c15[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 236 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 239 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 5 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 10 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:61742): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 237 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 241 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 252 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 242 (SyncThread:0): State: WAITING Blocked count: 26 Waited count: 734 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5c2cf318 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 243 (ProcessThread(sid:0 cport:61742):): State: WAITING Blocked count: 2 Waited count: 832 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@47961e7c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 244 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 869 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f227c03 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 245 (NIOWorkerThread-1): State: WAITING Blocked count: 3 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7290f4e9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 568 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 23 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (Time-limited test-SendThread(127.0.0.1:61742)): State: RUNNABLE Blocked count: 12 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 260 (Time-limited test-EventThread): State: WAITING Blocked count: 3 Waited count: 59 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@591cd7fe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 261 (NIOWorkerThread-2): State: WAITING Blocked count: 1 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-3): State: WAITING Blocked count: 5 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-4): State: WAITING Blocked count: 4 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (zk-event-processor-pool-0): State: WAITING Blocked count: 35 Waited count: 88 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6b66664e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-5): State: WAITING Blocked count: 1 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-6): State: WAITING Blocked count: 0 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-7): State: WAITING Blocked count: 4 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-8): State: WAITING Blocked count: 3 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-9): State: WAITING Blocked count: 5 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-10): State: WAITING Blocked count: 0 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-12): State: WAITING Blocked count: 0 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-14): State: WAITING Blocked count: 5 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-15): State: WAITING Blocked count: 4 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34343): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1ea4f077 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343): State: WAITING Blocked count: 88 Waited count: 354 Waiting on java.util.concurrent.Semaphore$NonfairSync@8395133 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343): State: WAITING Blocked count: 213 Waited count: 797 Waiting on java.util.concurrent.Semaphore$NonfairSync@3725c4ed Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34343): State: WAITING Blocked count: 122 Waited count: 5044 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6fa98051 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34343): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56b7f29b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34343): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56b7f29b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=34343): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@144591b7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=34343): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@d5e2d7e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34343): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2e569f8c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=34343): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@471c93f7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 290 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 312 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 334 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 72 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 288 (M:0;6b259cf15929:34343): State: TIMED_WAITING Blocked count: 6 Waited count: 2597 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$954/0x00007fc270eeebe8.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) Thread 357 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 50 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (master/6b259cf15929:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (master/6b259cf15929:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (org.apache.hadoop.hdfs.PeerCache@1583aea9): State: TIMED_WAITING Blocked count: 0 Waited count: 167 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 382 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 4982 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 399 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 105 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 400 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 105 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 68 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3a35632d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 423 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 50 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 411 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 49763 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 432 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 19 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 433 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 24 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 458 (RegionServerTracker-0): State: WAITING Blocked count: 10 Waited count: 14 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@9f50460 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 481 (regionserver/6b259cf15929:0.procedureResultReporter): State: WAITING Blocked count: 18 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3e1116dd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 480 (regionserver/6b259cf15929:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2f2c3639 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 482 (regionserver/6b259cf15929:0.procedureResultReporter): State: WAITING Blocked count: 11 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6a2aaa9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 531 (region-location-0): State: WAITING Blocked count: 9 Waited count: 14 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27556819 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 551 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 556 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 568 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 49563 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 581 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 592 (ForkJoinPool.commonPool-worker-3): State: TIMED_WAITING Blocked count: 0 Waited count: 578 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 613 (region-location-1): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27556819 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 614 (region-location-2): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27556819 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 615 (region-location-3): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27556819 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1019 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 397 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1080 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1108 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1121 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 63 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4cf35a86 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1178 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1179 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1532 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@40eaf6a1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2018 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3261 (region-location-4): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27556819 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4953 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4954 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4955 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8934 (AsyncFSWAL-1-hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/MasterData-prefix:6b259cf15929,34343,1734382820356): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70b021c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8938 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-16T21:08:49,285 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T21:09:19,285 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T21:09:40,566 DEBUG [M:0;6b259cf15929:34343 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-16T21:09:40,566 WARN [M:0;6b259cf15929:34343 {}] region.MasterRegion(134): Failed to close region org.apache.hadoop.hbase.regionserver.wal.WALSyncTimeoutIOException: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=3722, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:883) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.closeRegion(MasterRegion.java:132) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:205) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1758) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1285) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:603) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=3722, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:171) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) ~[classes/:?] ... 20 more 2024-12-16T21:09:40,567 WARN [Close-WAL-Writer-0 {}] wal.AsyncProtobufLogWriter(163): normal close failed, try recover java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.ensureWritable(int)" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.writeInt(FanOutOneBlockAsyncDFSOutput.java:396) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.lambda$writeWALTrailerAndMagic$3(AsyncProtobufLogWriter.java:243) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALMetadata(AsyncProtobufLogWriter.java:201) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALTrailerAndMagic(AsyncProtobufLogWriter.java:236) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractProtobufLogWriter.writeWALTrailer(AbstractProtobufLogWriter.java:252) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:160) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:732) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T21:09:40,569 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-16T21:09:40,569 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-16T21:09:40,569 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file /user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/MasterData/WALs/6b259cf15929,34343,1734382820356/6b259cf15929%2C34343%2C1734382820356.1734382822217 2024-12-16T21:09:40,570 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/MasterData/WALs/6b259cf15929,34343,1734382820356/6b259cf15929%2C34343%2C1734382820356.1734382822217 after 1ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:610) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:164) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:732) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T21:09:40,570 WARN [Close-WAL-Writer-0 {}] wal.AsyncFSWAL(734): close old writer failed. java.io.InterruptedIOException: Operation cancelled at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.checkIfCancelled(RecoverLeaseFSUtils.java:269) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:159) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:610) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:164) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:732) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T21:09:40,570 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/MasterData/WALs/6b259cf15929,34343,1734382820356/6b259cf15929%2C34343%2C1734382820356.1734382822217 2024-12-16T21:09:40,571 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/MasterData/WALs/6b259cf15929,34343,1734382820356/6b259cf15929%2C34343%2C1734382820356.1734382822217 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.recoverLease(AsyncFSWAL.java:722) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:735) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;6b259cf15929:34343 220 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 38 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@7a9c1ebf Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 13 Waited count: 21 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@629049e9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 28 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5663 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 57 Waiting on java.util.concurrent.CountDownLatch$Sync@29d09631 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12129 Waited count: 12668 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 13 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@4e22a6cd Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@2e9d71e1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@263e077f): State: TIMED_WAITING Blocked count: 0 Waited count: 1128 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 113 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp2988114-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007fc270428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp2988114-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007fc270428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp2988114-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007fc270428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp2988114-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007fc270428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp2988114-41-acceptor-0@26ff4d05-ServerConnector@2d3d9b09{HTTP/1.1, (http/1.1)}{localhost:44909}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp2988114-42): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp2988114-43): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp2988114-44): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-4898edba-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 27 Waited count: 3067 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@426ecade Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 37263): State: TIMED_WAITING Blocked count: 1 Waited count: 58 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 113 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@1f645805): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 188 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@40205119): State: TIMED_WAITING Blocked count: 0 Waited count: 113 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 189 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 55811 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 2 Waited count: 1570 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4b4c89b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 37263): State: TIMED_WAITING Blocked count: 73 Waited count: 2344 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 37263): State: TIMED_WAITING Blocked count: 85 Waited count: 2353 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 37263): State: TIMED_WAITING Blocked count: 79 Waited count: 2334 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 37263): State: TIMED_WAITING Blocked count: 66 Waited count: 2331 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 37263): State: TIMED_WAITING Blocked count: 66 Waited count: 2342 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@34326363): State: TIMED_WAITING Blocked count: 0 Waited count: 282 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@21d9c23a): State: TIMED_WAITING Blocked count: 0 Waited count: 113 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2b27bce4): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@6f79ee98): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1603592889)): State: TIMED_WAITING Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp671127358-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007fc270428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp671127358-88-acceptor-0@754170a3-ServerConnector@39bccd2b{HTTP/1.1, (http/1.1)}{localhost:43589}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp671127358-89): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp671127358-90): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-801d092-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@3509b2ec): State: TIMED_WAITING Blocked count: 0 Waited count: 1125 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 37339): State: TIMED_WAITING Blocked count: 1 Waited count: 58 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 113 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 312 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@623db8d0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1970874171-172.17.0.2-1734382816445 heartbeating to localhost/127.0.0.1:37263): State: TIMED_WAITING Blocked count: 1323 Waited count: 1500 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@3f77ee2e): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 37339): State: TIMED_WAITING Blocked count: 0 Waited count: 592 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 37339): State: TIMED_WAITING Blocked count: 0 Waited count: 580 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 37339): State: TIMED_WAITING Blocked count: 0 Waited count: 592 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 37339): State: TIMED_WAITING Blocked count: 0 Waited count: 590 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 37339): State: TIMED_WAITING Blocked count: 0 Waited count: 564 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp2042583043-121): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007fc270428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (IPC Client (1891196410) connection to localhost/127.0.0.1:37263 from jenkins): State: TIMED_WAITING Blocked count: 1487 Waited count: 1488 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 122 (qtp2042583043-122-acceptor-0@2935c2d7-ServerConnector@5be8101a{HTTP/1.1, (http/1.1)}{localhost:46059}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (IPC Parameter Sending Thread for localhost/127.0.0.1:37263): State: TIMED_WAITING Blocked count: 0 Waited count: 2137 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp2042583043-123): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp2042583043-124): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-67a2be6d-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@5731364a): State: TIMED_WAITING Blocked count: 0 Waited count: 1125 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 44195): State: TIMED_WAITING Blocked count: 1 Waited count: 58 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 113 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (Command processor): State: WAITING Blocked count: 1 Waited count: 356 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5e6823d8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 137 (BP-1970874171-172.17.0.2-1734382816445 heartbeating to localhost/127.0.0.1:37263): State: TIMED_WAITING Blocked count: 1318 Waited count: 1497 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@61da8f0b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 44195): State: TIMED_WAITING Blocked count: 0 Waited count: 563 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 44195): State: TIMED_WAITING Blocked count: 0 Waited count: 563 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 44195): State: TIMED_WAITING Blocked count: 0 Waited count: 563 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 44195): State: TIMED_WAITING Blocked count: 0 Waited count: 564 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 144 (IPC Server handler 4 on default port 44195): State: TIMED_WAITING Blocked count: 0 Waited count: 564 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 155 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1446323025-156): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007fc270428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp1446323025-158-acceptor-0@3c5408ed-ServerConnector@2cc41f37{HTTP/1.1, (http/1.1)}{localhost:40415}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp1446323025-159): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp1446323025-160): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Session-HouseKeeper-156b960-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data3)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data1)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data4)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data2)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 174 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data1/current/BP-1970874171-172.17.0.2-1734382816445): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 175 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data3/current/BP-1970874171-172.17.0.2-1734382816445): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data2/current/BP-1970874171-172.17.0.2-1734382816445): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data4/current/BP-1970874171-172.17.0.2-1734382816445): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 185 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 189 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@7666605a): State: TIMED_WAITING Blocked count: 0 Waited count: 1124 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 193 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 194 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 195 (IPC Server idle connection scanner for port 38085): State: TIMED_WAITING Blocked count: 1 Waited count: 58 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 200 (java.util.concurrent.ThreadPoolExecutor$Worker@6cb1b31e[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 113 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (java.util.concurrent.ThreadPoolExecutor$Worker@4de44e5a[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (Command processor): State: WAITING Blocked count: 1 Waited count: 324 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a55c4ea Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 208 (BP-1970874171-172.17.0.2-1734382816445 heartbeating to localhost/127.0.0.1:37263): State: TIMED_WAITING Blocked count: 1295 Waited count: 1493 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 209 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@11e89da1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 192 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 210 (IPC Server handler 0 on default port 38085): State: TIMED_WAITING Blocked count: 0 Waited count: 562 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 1 on default port 38085): State: TIMED_WAITING Blocked count: 0 Waited count: 571 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 2 on default port 38085): State: TIMED_WAITING Blocked count: 0 Waited count: 570 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 3 on default port 38085): State: TIMED_WAITING Blocked count: 0 Waited count: 571 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 214 (IPC Server handler 4 on default port 38085): State: TIMED_WAITING Blocked count: 0 Waited count: 562 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 217 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 218 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 222 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data5/current/BP-1970874171-172.17.0.2-1734382816445): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 224 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data6/current/BP-1970874171-172.17.0.2-1734382816445): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (java.util.concurrent.ThreadPoolExecutor$Worker@d839c15[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 236 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 239 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 5 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 10 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:61742): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 237 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 58 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 241 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 282 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 242 (SyncThread:0): State: WAITING Blocked count: 26 Waited count: 738 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5c2cf318 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 243 (ProcessThread(sid:0 cport:61742):): State: WAITING Blocked count: 2 Waited count: 836 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@47961e7c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 244 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 873 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f227c03 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 245 (NIOWorkerThread-1): State: WAITING Blocked count: 3 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7290f4e9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 616 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 23 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (Time-limited test-SendThread(127.0.0.1:61742)): State: RUNNABLE Blocked count: 12 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 260 (Time-limited test-EventThread): State: WAITING Blocked count: 3 Waited count: 59 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@591cd7fe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 261 (NIOWorkerThread-2): State: WAITING Blocked count: 1 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-3): State: WAITING Blocked count: 5 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-4): State: WAITING Blocked count: 4 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (zk-event-processor-pool-0): State: WAITING Blocked count: 35 Waited count: 88 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6b66664e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-5): State: WAITING Blocked count: 1 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-6): State: WAITING Blocked count: 0 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-7): State: WAITING Blocked count: 4 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-8): State: WAITING Blocked count: 3 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-9): State: WAITING Blocked count: 5 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-10): State: WAITING Blocked count: 0 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-12): State: WAITING Blocked count: 0 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-14): State: WAITING Blocked count: 5 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-15): State: WAITING Blocked count: 4 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@491f7ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34343): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1ea4f077 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34343): State: WAITING Blocked count: 88 Waited count: 354 Waiting on java.util.concurrent.Semaphore$NonfairSync@8395133 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34343): State: WAITING Blocked count: 213 Waited count: 797 Waiting on java.util.concurrent.Semaphore$NonfairSync@3725c4ed Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34343): State: WAITING Blocked count: 122 Waited count: 5044 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6fa98051 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34343): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56b7f29b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34343): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56b7f29b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=34343): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@144591b7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=34343): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@d5e2d7e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34343): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2e569f8c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=34343): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@471c93f7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 290 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 312 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 334 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 72 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 288 (M:0;6b259cf15929:34343): State: TIMED_WAITING Blocked count: 6 Waited count: 2598 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.shutdown(AbstractFSWAL.java:1011) app//org.apache.hadoop.hbase.wal.AbstractFSWALProvider.shutdown(AbstractFSWALProvider.java:184) app//org.apache.hadoop.hbase.wal.WALFactory.shutdown(WALFactory.java:272) app//org.apache.hadoop.hbase.master.region.MasterRegion.shutdownWAL(MasterRegion.java:140) app//org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:206) app//org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1758) app//org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1285) app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:603) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 56 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (master/6b259cf15929:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (master/6b259cf15929:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (org.apache.hadoop.hdfs.PeerCache@1583aea9): State: TIMED_WAITING Blocked count: 0 Waited count: 187 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 382 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5582 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 399 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 105 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 400 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 105 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 68 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3a35632d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 423 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 411 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 55764 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 432 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 19 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 433 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 24 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 458 (RegionServerTracker-0): State: WAITING Blocked count: 10 Waited count: 14 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@9f50460 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 481 (regionserver/6b259cf15929:0.procedureResultReporter): State: WAITING Blocked count: 18 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3e1116dd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 480 (regionserver/6b259cf15929:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2f2c3639 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 482 (regionserver/6b259cf15929:0.procedureResultReporter): State: WAITING Blocked count: 11 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6a2aaa9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 531 (region-location-0): State: WAITING Blocked count: 9 Waited count: 14 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27556819 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 551 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 556 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 568 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 55564 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 581 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 613 (region-location-1): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27556819 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 614 (region-location-2): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27556819 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 615 (region-location-3): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27556819 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1019 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 403 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1080 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1108 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1121 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 63 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4cf35a86 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1178 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1179 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1532 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@40eaf6a1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2018 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3261 (region-location-4): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27556819 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4953 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4954 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4955 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8934 (AsyncFSWAL-1-hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/MasterData-prefix:6b259cf15929,34343,1734382820356): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70b021c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8938 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 26 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 8939 (WAL-Shutdown-0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.awaitTermination(ThreadPoolExecutor.java:1464) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doShutdown(AsyncFSWAL.java:793) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:995) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:990) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8940 (Close-WAL-Writer-0): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:166) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.recoverLease(AsyncFSWAL.java:722) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:735) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL$$Lambda$1126/0x00007fc27111ba90.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-12-16T21:09:44,571 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=1 on file=hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/MasterData/WALs/6b259cf15929,34343,1734382820356/6b259cf15929%2C34343%2C1734382820356.1734382822217 after 4001ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.recoverLease(AsyncFSWAL.java:722) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:735) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T21:09:45,567 ERROR [WAL-Shutdown-0 {}] wal.AsyncFSWAL(794): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.async.wait.on.shutdown.seconds" 2024-12-16T21:09:45,568 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-16T21:09:45,568 INFO [M:0;6b259cf15929:34343 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-16T21:09:45,568 INFO [M:0;6b259cf15929:34343 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:34343 2024-12-16T21:09:45,570 DEBUG [M:0;6b259cf15929:34343 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/6b259cf15929,34343,1734382820356 already deleted, retry=false 2024-12-16T21:09:45,573 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37263/user/jenkins/test-data/5991c7e1-0994-a917-b23d-f8ca199049ba/MasterData/WALs/6b259cf15929,34343,1734382820356/6b259cf15929%2C34343%2C1734382820356.1734382822217 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.recoverLease(AsyncFSWAL.java:722) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:735) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-12-16T21:09:45,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T21:09:45,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34343-0x1000b4e97660000, quorum=127.0.0.1:61742, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T21:09:45,671 INFO [M:0;6b259cf15929:34343 {}] regionserver.HRegionServer(1307): Exiting; stopping=6b259cf15929,34343,1734382820356; zookeeper connection closed. 2024-12-16T21:09:45,675 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6bdef31c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T21:09:45,675 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2cc41f37{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T21:09:45,675 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T21:09:45,675 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7f4d5ab4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-16T21:09:45,675 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2e1b48b6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/hadoop.log.dir/,STOPPED} 2024-12-16T21:09:45,677 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-16T21:09:45,677 WARN [BP-1970874171-172.17.0.2-1734382816445 heartbeating to localhost/127.0.0.1:37263 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-16T21:09:45,677 WARN [BP-1970874171-172.17.0.2-1734382816445 heartbeating to localhost/127.0.0.1:37263 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1970874171-172.17.0.2-1734382816445 (Datanode Uuid 25a9c4e7-cb6f-40b7-aff9-f0b9825d31c0) service to localhost/127.0.0.1:37263 2024-12-16T21:09:45,677 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-16T21:09:45,678 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data5/current/BP-1970874171-172.17.0.2-1734382816445 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T21:09:45,678 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data6/current/BP-1970874171-172.17.0.2-1734382816445 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T21:09:45,679 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-16T21:09:45,681 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@14090edc{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T21:09:45,681 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5be8101a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T21:09:45,681 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T21:09:45,681 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@a110049{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-16T21:09:45,681 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@45f72ff{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/hadoop.log.dir/,STOPPED} 2024-12-16T21:09:45,682 WARN [BP-1970874171-172.17.0.2-1734382816445 heartbeating to localhost/127.0.0.1:37263 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-16T21:09:45,682 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-16T21:09:45,682 WARN [BP-1970874171-172.17.0.2-1734382816445 heartbeating to localhost/127.0.0.1:37263 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1970874171-172.17.0.2-1734382816445 (Datanode Uuid a092c635-3b62-48e5-b8a3-2cb297fa0a07) service to localhost/127.0.0.1:37263 2024-12-16T21:09:45,682 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-16T21:09:45,683 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data3/current/BP-1970874171-172.17.0.2-1734382816445 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T21:09:45,683 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data4/current/BP-1970874171-172.17.0.2-1734382816445 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T21:09:45,683 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-16T21:09:45,685 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5a633356{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T21:09:45,685 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@39bccd2b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T21:09:45,685 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T21:09:45,685 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d5648e9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-16T21:09:45,685 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7622634b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/hadoop.log.dir/,STOPPED} 2024-12-16T21:09:45,686 WARN [BP-1970874171-172.17.0.2-1734382816445 heartbeating to localhost/127.0.0.1:37263 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-16T21:09:45,686 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-16T21:09:45,686 WARN [BP-1970874171-172.17.0.2-1734382816445 heartbeating to localhost/127.0.0.1:37263 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1970874171-172.17.0.2-1734382816445 (Datanode Uuid ea823c6c-168e-4f2e-9a2f-daa6b4a4e26f) service to localhost/127.0.0.1:37263 2024-12-16T21:09:45,686 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-16T21:09:45,687 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data1/current/BP-1970874171-172.17.0.2-1734382816445 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T21:09:45,687 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/cluster_ffb52b47-4a32-12c3-2072-3bb5be5f9c72/dfs/data/data2/current/BP-1970874171-172.17.0.2-1734382816445 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T21:09:45,687 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-16T21:09:45,693 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7883a2cb{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-16T21:09:45,694 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2d3d9b09{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T21:09:45,694 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T21:09:45,694 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@343317a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-16T21:09:45,694 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a82d853{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/cb442fa7-60e7-dd82-8c55-6f73e0dfbd17/hadoop.log.dir/,STOPPED} 2024-12-16T21:09:45,705 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-16T21:09:45,955 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down