2024-12-13 07:32:54,603 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@57bc27f5 2024-12-13 07:32:54,621 main DEBUG Took 0.015487 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-13 07:32:54,621 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-13 07:32:54,622 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-13 07:32:54,623 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-13 07:32:54,624 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 07:32:54,633 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-13 07:32:54,650 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 07:32:54,652 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 07:32:54,653 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 07:32:54,653 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 07:32:54,654 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 07:32:54,654 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 07:32:54,656 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 07:32:54,656 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 07:32:54,657 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 07:32:54,657 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 07:32:54,658 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 07:32:54,659 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 07:32:54,660 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 07:32:54,660 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 07:32:54,663 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 07:32:54,664 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 07:32:54,664 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 07:32:54,665 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 07:32:54,665 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 07:32:54,666 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 07:32:54,667 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 07:32:54,670 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 07:32:54,672 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 07:32:54,672 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 07:32:54,673 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 07:32:54,673 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-13 07:32:54,676 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 07:32:54,678 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-13 07:32:54,681 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-13 07:32:54,682 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-13 07:32:54,696 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-13 07:32:54,697 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-13 07:32:54,728 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-13 07:32:54,732 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-13 07:32:54,734 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-13 07:32:54,735 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-13 07:32:54,735 main DEBUG createAppenders(={Console}) 2024-12-13 07:32:54,737 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@57bc27f5 initialized 2024-12-13 07:32:54,737 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@57bc27f5 2024-12-13 07:32:54,738 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@57bc27f5 OK. 2024-12-13 07:32:54,739 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-13 07:32:54,739 main DEBUG OutputStream closed 2024-12-13 07:32:54,740 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-13 07:32:54,740 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-13 07:32:54,740 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@57cf54e1 OK 2024-12-13 07:32:54,879 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-13 07:32:54,882 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-13 07:32:54,884 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-13 07:32:54,885 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-13 07:32:54,905 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-13 07:32:54,906 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-13 07:32:54,907 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-13 07:32:54,913 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-13 07:32:54,913 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-13 07:32:54,914 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-13 07:32:54,914 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-13 07:32:54,914 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-13 07:32:54,915 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-13 07:32:54,916 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-13 07:32:54,916 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-13 07:32:54,917 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-13 07:32:54,918 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-13 07:32:54,919 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-13 07:32:54,924 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-13 07:32:54,925 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@29ca3d04) with optional ClassLoader: null 2024-12-13 07:32:54,925 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-13 07:32:54,927 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@29ca3d04] started OK. 2024-12-13T07:32:54,957 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestExportSnapshot timeout: 13 mins 2024-12-13 07:32:54,961 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-13 07:32:54,962 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-13T07:32:55,366 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61 2024-12-13T07:32:55,367 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestSecureExportSnapshot timeout: 13 mins 2024-12-13T07:32:55,410 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-13T07:32:55,671 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-13T07:32:55,672 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725, deleteOnExit=true 2024-12-13T07:32:55,672 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-13T07:32:55,673 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/test.cache.data in system properties and HBase conf 2024-12-13T07:32:55,675 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/hadoop.tmp.dir in system properties and HBase conf 2024-12-13T07:32:55,676 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/hadoop.log.dir in system properties and HBase conf 2024-12-13T07:32:55,677 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-13T07:32:55,677 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-13T07:32:55,678 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-13T07:32:55,794 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-13T07:32:55,804 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-13T07:32:55,807 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-13T07:32:55,808 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-13T07:32:55,809 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-13T07:32:55,812 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-13T07:32:55,812 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-13T07:32:55,813 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-13T07:32:55,814 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-13T07:32:55,815 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-13T07:32:55,815 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/nfs.dump.dir in system properties and HBase conf 2024-12-13T07:32:55,816 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/java.io.tmpdir in system properties and HBase conf 2024-12-13T07:32:55,816 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-13T07:32:55,817 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-13T07:32:55,817 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-13T07:32:57,184 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-13T07:32:57,302 INFO [Time-limited test {}] log.Log(170): Logging initialized @3788ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-13T07:32:57,413 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T07:32:57,546 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T07:32:57,626 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T07:32:57,626 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T07:32:57,630 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-13T07:32:57,654 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T07:32:57,658 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@f8ccbbc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/hadoop.log.dir/,AVAILABLE} 2024-12-13T07:32:57,660 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@432ebcaa{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-13T07:32:57,909 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@59f3fe3e{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/java.io.tmpdir/jetty-localhost-44519-hadoop-hdfs-3_4_1-tests_jar-_-any-3795763174136283965/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-13T07:32:57,918 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1bde3171{HTTP/1.1, (http/1.1)}{localhost:44519} 2024-12-13T07:32:57,919 INFO [Time-limited test {}] server.Server(415): Started @4406ms 2024-12-13T07:32:58,512 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T07:32:58,519 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T07:32:58,520 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T07:32:58,520 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T07:32:58,521 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-13T07:32:58,522 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3ffbec59{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/hadoop.log.dir/,AVAILABLE} 2024-12-13T07:32:58,523 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1815b75{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-13T07:32:58,633 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6d992105{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/java.io.tmpdir/jetty-localhost-36637-hadoop-hdfs-3_4_1-tests_jar-_-any-3132930638144737152/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T07:32:58,634 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@440e205c{HTTP/1.1, (http/1.1)}{localhost:36637} 2024-12-13T07:32:58,634 INFO [Time-limited test {}] server.Server(415): Started @5122ms 2024-12-13T07:32:58,695 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-13T07:32:58,893 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T07:32:58,904 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T07:32:58,908 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T07:32:58,908 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T07:32:58,908 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-13T07:32:58,911 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7467d7f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/hadoop.log.dir/,AVAILABLE} 2024-12-13T07:32:58,912 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4208f97a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-13T07:32:59,041 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7d2dc9a9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/java.io.tmpdir/jetty-localhost-33899-hadoop-hdfs-3_4_1-tests_jar-_-any-8809160131134625717/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T07:32:59,042 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@42d696f2{HTTP/1.1, (http/1.1)}{localhost:33899} 2024-12-13T07:32:59,042 INFO [Time-limited test {}] server.Server(415): Started @5530ms 2024-12-13T07:32:59,047 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-13T07:32:59,117 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T07:32:59,122 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T07:32:59,125 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T07:32:59,125 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T07:32:59,125 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-13T07:32:59,128 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@57eb71ef{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/hadoop.log.dir/,AVAILABLE} 2024-12-13T07:32:59,128 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1e0ba457{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-13T07:32:59,237 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@721cee68{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/java.io.tmpdir/jetty-localhost-36975-hadoop-hdfs-3_4_1-tests_jar-_-any-6636968146412007008/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T07:32:59,238 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7a125539{HTTP/1.1, (http/1.1)}{localhost:36975} 2024-12-13T07:32:59,238 INFO [Time-limited test {}] server.Server(415): Started @5726ms 2024-12-13T07:32:59,240 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-13T07:33:00,413 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data1/current/BP-130630477-172.17.0.2-1734075176645/current, will proceed with Du for space computation calculation, 2024-12-13T07:33:00,413 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data4/current/BP-130630477-172.17.0.2-1734075176645/current, will proceed with Du for space computation calculation, 2024-12-13T07:33:00,413 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data2/current/BP-130630477-172.17.0.2-1734075176645/current, will proceed with Du for space computation calculation, 2024-12-13T07:33:00,413 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data3/current/BP-130630477-172.17.0.2-1734075176645/current, will proceed with Du for space computation calculation, 2024-12-13T07:33:00,447 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-13T07:33:00,450 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-13T07:33:00,491 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9569d2481671be21 with lease ID 0x23615d7fb4b8dcf0: Processing first storage report for DS-3366fd89-6dd0-4619-a690-b626ebfdf962 from datanode DatanodeRegistration(127.0.0.1:33299, datanodeUuid=dd92cc55-e12e-44ba-9ba8-99aa86645a78, infoPort=33377, infoSecurePort=0, ipcPort=38381, storageInfo=lv=-57;cid=testClusterID;nsid=333704626;c=1734075176645) 2024-12-13T07:33:00,492 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9569d2481671be21 with lease ID 0x23615d7fb4b8dcf0: from storage DS-3366fd89-6dd0-4619-a690-b626ebfdf962 node DatanodeRegistration(127.0.0.1:33299, datanodeUuid=dd92cc55-e12e-44ba-9ba8-99aa86645a78, infoPort=33377, infoSecurePort=0, ipcPort=38381, storageInfo=lv=-57;cid=testClusterID;nsid=333704626;c=1734075176645), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-13T07:33:00,493 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x815f2bef41dd957f with lease ID 0x23615d7fb4b8dcf1: Processing first storage report for DS-addf9c7a-b733-4006-9870-a44eb3beb221 from datanode DatanodeRegistration(127.0.0.1:40823, datanodeUuid=71923675-b3b8-4b71-9f3a-008f5bf139fb, infoPort=33473, infoSecurePort=0, ipcPort=38399, storageInfo=lv=-57;cid=testClusterID;nsid=333704626;c=1734075176645) 2024-12-13T07:33:00,493 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x815f2bef41dd957f with lease ID 0x23615d7fb4b8dcf1: from storage DS-addf9c7a-b733-4006-9870-a44eb3beb221 node DatanodeRegistration(127.0.0.1:40823, datanodeUuid=71923675-b3b8-4b71-9f3a-008f5bf139fb, infoPort=33473, infoSecurePort=0, ipcPort=38399, storageInfo=lv=-57;cid=testClusterID;nsid=333704626;c=1734075176645), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-13T07:33:00,494 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9569d2481671be21 with lease ID 0x23615d7fb4b8dcf0: Processing first storage report for DS-b769a0e9-2b87-4772-938e-5f797c979736 from datanode DatanodeRegistration(127.0.0.1:33299, datanodeUuid=dd92cc55-e12e-44ba-9ba8-99aa86645a78, infoPort=33377, infoSecurePort=0, ipcPort=38381, storageInfo=lv=-57;cid=testClusterID;nsid=333704626;c=1734075176645) 2024-12-13T07:33:00,494 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9569d2481671be21 with lease ID 0x23615d7fb4b8dcf0: from storage DS-b769a0e9-2b87-4772-938e-5f797c979736 node DatanodeRegistration(127.0.0.1:33299, datanodeUuid=dd92cc55-e12e-44ba-9ba8-99aa86645a78, infoPort=33377, infoSecurePort=0, ipcPort=38381, storageInfo=lv=-57;cid=testClusterID;nsid=333704626;c=1734075176645), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-13T07:33:00,494 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x815f2bef41dd957f with lease ID 0x23615d7fb4b8dcf1: Processing first storage report for DS-8a09b0b9-ad94-40cd-b1df-ec61ac2c8868 from datanode DatanodeRegistration(127.0.0.1:40823, datanodeUuid=71923675-b3b8-4b71-9f3a-008f5bf139fb, infoPort=33473, infoSecurePort=0, ipcPort=38399, storageInfo=lv=-57;cid=testClusterID;nsid=333704626;c=1734075176645) 2024-12-13T07:33:00,494 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x815f2bef41dd957f with lease ID 0x23615d7fb4b8dcf1: from storage DS-8a09b0b9-ad94-40cd-b1df-ec61ac2c8868 node DatanodeRegistration(127.0.0.1:40823, datanodeUuid=71923675-b3b8-4b71-9f3a-008f5bf139fb, infoPort=33473, infoSecurePort=0, ipcPort=38399, storageInfo=lv=-57;cid=testClusterID;nsid=333704626;c=1734075176645), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-13T07:33:00,545 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data5/current/BP-130630477-172.17.0.2-1734075176645/current, will proceed with Du for space computation calculation, 2024-12-13T07:33:00,545 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data6/current/BP-130630477-172.17.0.2-1734075176645/current, will proceed with Du for space computation calculation, 2024-12-13T07:33:00,573 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-13T07:33:00,577 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8a34e1427d5ee87 with lease ID 0x23615d7fb4b8dcf2: Processing first storage report for DS-76d4a548-41e1-4f97-8915-a3f81cb2319e from datanode DatanodeRegistration(127.0.0.1:34409, datanodeUuid=c2e5d365-f7c3-4c18-8e6d-2355967b2d88, infoPort=35827, infoSecurePort=0, ipcPort=41463, storageInfo=lv=-57;cid=testClusterID;nsid=333704626;c=1734075176645) 2024-12-13T07:33:00,577 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8a34e1427d5ee87 with lease ID 0x23615d7fb4b8dcf2: from storage DS-76d4a548-41e1-4f97-8915-a3f81cb2319e node DatanodeRegistration(127.0.0.1:34409, datanodeUuid=c2e5d365-f7c3-4c18-8e6d-2355967b2d88, infoPort=35827, infoSecurePort=0, ipcPort=41463, storageInfo=lv=-57;cid=testClusterID;nsid=333704626;c=1734075176645), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-13T07:33:00,578 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8a34e1427d5ee87 with lease ID 0x23615d7fb4b8dcf2: Processing first storage report for DS-3d5757da-f232-4b72-811e-1bca432273ed from datanode DatanodeRegistration(127.0.0.1:34409, datanodeUuid=c2e5d365-f7c3-4c18-8e6d-2355967b2d88, infoPort=35827, infoSecurePort=0, ipcPort=41463, storageInfo=lv=-57;cid=testClusterID;nsid=333704626;c=1734075176645) 2024-12-13T07:33:00,578 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8a34e1427d5ee87 with lease ID 0x23615d7fb4b8dcf2: from storage DS-3d5757da-f232-4b72-811e-1bca432273ed node DatanodeRegistration(127.0.0.1:34409, datanodeUuid=c2e5d365-f7c3-4c18-8e6d-2355967b2d88, infoPort=35827, infoSecurePort=0, ipcPort=41463, storageInfo=lv=-57;cid=testClusterID;nsid=333704626;c=1734075176645), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-13T07:33:00,634 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61 2024-12-13T07:33:00,762 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/zookeeper_0, clientPort=59810, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-13T07:33:00,777 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=59810 2024-12-13T07:33:00,792 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T07:33:00,795 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T07:33:01,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741825_1001 (size=7) 2024-12-13T07:33:01,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741825_1001 (size=7) 2024-12-13T07:33:01,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741825_1001 (size=7) 2024-12-13T07:33:01,479 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 with version=8 2024-12-13T07:33:01,480 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/hbase-staging 2024-12-13T07:33:01,588 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-13T07:33:01,808 INFO [Time-limited test {}] client.ConnectionUtils(129): master/9eb75de94ef3:0 server-side Connection retries=45 2024-12-13T07:33:01,826 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T07:33:01,827 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-13T07:33:01,827 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-13T07:33:01,828 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T07:33:01,828 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-13T07:33:01,969 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-13T07:33:02,025 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-13T07:33:02,034 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-13T07:33:02,037 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-13T07:33:02,060 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 49817 (auto-detected) 2024-12-13T07:33:02,061 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-13T07:33:02,080 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:33713 2024-12-13T07:33:02,089 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T07:33:02,091 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T07:33:02,103 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:33713 connecting to ZooKeeper ensemble=127.0.0.1:59810 2024-12-13T07:33:02,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:337130x0, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-13T07:33:02,180 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33713-0x1001e4e83250000 connected 2024-12-13T07:33:02,251 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-13T07:33:02,255 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T07:33:02,269 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-13T07:33:02,275 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33713 2024-12-13T07:33:02,276 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33713 2024-12-13T07:33:02,277 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33713 2024-12-13T07:33:02,278 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33713 2024-12-13T07:33:02,278 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33713 2024-12-13T07:33:02,289 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534, hbase.cluster.distributed=false 2024-12-13T07:33:02,343 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/9eb75de94ef3:0 server-side Connection retries=45 2024-12-13T07:33:02,343 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T07:33:02,343 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-13T07:33:02,343 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-13T07:33:02,343 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T07:33:02,343 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-13T07:33:02,345 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-13T07:33:02,348 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-13T07:33:02,357 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:36021 2024-12-13T07:33:02,359 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-13T07:33:02,367 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-13T07:33:02,369 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T07:33:02,374 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T07:33:02,382 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:36021 connecting to ZooKeeper ensemble=127.0.0.1:59810 2024-12-13T07:33:02,399 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:360210x0, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-13T07:33:02,400 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-13T07:33:02,400 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36021-0x1001e4e83250001 connected 2024-12-13T07:33:02,401 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T07:33:02,402 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-13T07:33:02,409 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36021 2024-12-13T07:33:02,409 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36021 2024-12-13T07:33:02,410 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36021 2024-12-13T07:33:02,410 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36021 2024-12-13T07:33:02,411 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36021 2024-12-13T07:33:02,431 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/9eb75de94ef3:0 server-side Connection retries=45 2024-12-13T07:33:02,432 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T07:33:02,432 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-13T07:33:02,433 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-13T07:33:02,433 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T07:33:02,433 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-13T07:33:02,434 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-13T07:33:02,434 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-13T07:33:02,438 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:41895 2024-12-13T07:33:02,438 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-13T07:33:02,442 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-13T07:33:02,444 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T07:33:02,452 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T07:33:02,460 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:41895 connecting to ZooKeeper ensemble=127.0.0.1:59810 2024-12-13T07:33:02,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:418950x0, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-13T07:33:02,474 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:418950x0, quorum=127.0.0.1:59810, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-13T07:33:02,474 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41895-0x1001e4e83250002 connected 2024-12-13T07:33:02,476 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T07:33:02,477 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-13T07:33:02,491 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41895 2024-12-13T07:33:02,495 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41895 2024-12-13T07:33:02,499 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41895 2024-12-13T07:33:02,502 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41895 2024-12-13T07:33:02,503 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41895 2024-12-13T07:33:02,527 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/9eb75de94ef3:0 server-side Connection retries=45 2024-12-13T07:33:02,527 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T07:33:02,527 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-13T07:33:02,527 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-13T07:33:02,527 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T07:33:02,527 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-13T07:33:02,527 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-13T07:33:02,528 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-13T07:33:02,531 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:42189 2024-12-13T07:33:02,532 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-13T07:33:02,534 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-13T07:33:02,535 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T07:33:02,538 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T07:33:02,542 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:42189 connecting to ZooKeeper ensemble=127.0.0.1:59810 2024-12-13T07:33:02,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:421890x0, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-13T07:33:02,550 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42189-0x1001e4e83250003 connected 2024-12-13T07:33:02,550 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-13T07:33:02,551 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T07:33:02,552 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-13T07:33:02,556 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42189 2024-12-13T07:33:02,556 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42189 2024-12-13T07:33:02,557 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42189 2024-12-13T07:33:02,557 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42189 2024-12-13T07:33:02,558 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42189 2024-12-13T07:33:02,560 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/9eb75de94ef3,33713,1734075181583 2024-12-13T07:33:02,574 DEBUG [M:0;9eb75de94ef3:33713 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;9eb75de94ef3:33713 2024-12-13T07:33:02,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T07:33:02,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T07:33:02,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T07:33:02,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T07:33:02,577 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/9eb75de94ef3,33713,1734075181583 2024-12-13T07:33:02,607 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-13T07:33:02,607 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-13T07:33:02,607 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-13T07:33:02,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T07:33:02,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T07:33:02,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-13T07:33:02,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T07:33:02,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T07:33:02,609 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-13T07:33:02,612 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-13T07:33:02,612 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/9eb75de94ef3,33713,1734075181583 from backup master directory 2024-12-13T07:33:02,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T07:33:02,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T07:33:02,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T07:33:02,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/9eb75de94ef3,33713,1734075181583 2024-12-13T07:33:02,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T07:33:02,626 WARN [master/9eb75de94ef3:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-13T07:33:02,627 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=9eb75de94ef3,33713,1734075181583 2024-12-13T07:33:02,629 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-13T07:33:02,630 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-13T07:33:02,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741826_1002 (size=42) 2024-12-13T07:33:02,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741826_1002 (size=42) 2024-12-13T07:33:02,713 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/hbase.id with ID: ea5d5b9b-5313-4835-b0e8-96e026f51db6 2024-12-13T07:33:02,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741826_1002 (size=42) 2024-12-13T07:33:02,761 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T07:33:02,790 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T07:33:02,790 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T07:33:02,790 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T07:33:02,790 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T07:33:02,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741827_1003 (size=196) 2024-12-13T07:33:02,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741827_1003 (size=196) 2024-12-13T07:33:02,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741827_1003 (size=196) 2024-12-13T07:33:02,828 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T07:33:02,830 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-13T07:33:02,847 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T07:33:02,851 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-13T07:33:02,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741828_1004 (size=1189) 2024-12-13T07:33:02,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741828_1004 (size=1189) 2024-12-13T07:33:02,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741828_1004 (size=1189) 2024-12-13T07:33:02,916 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/MasterData/data/master/store 2024-12-13T07:33:02,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741829_1005 (size=34) 2024-12-13T07:33:02,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741829_1005 (size=34) 2024-12-13T07:33:02,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741829_1005 (size=34) 2024-12-13T07:33:02,952 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-13T07:33:02,952 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:33:02,954 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-13T07:33:02,954 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T07:33:02,955 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T07:33:02,955 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-13T07:33:02,955 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T07:33:02,955 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T07:33:02,955 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-13T07:33:02,958 WARN [master/9eb75de94ef3:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/MasterData/data/master/store/.initializing 2024-12-13T07:33:02,958 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/MasterData/WALs/9eb75de94ef3,33713,1734075181583 2024-12-13T07:33:02,966 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-13T07:33:02,988 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9eb75de94ef3%2C33713%2C1734075181583, suffix=, logDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/MasterData/WALs/9eb75de94ef3,33713,1734075181583, archiveDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/MasterData/oldWALs, maxLogs=10 2024-12-13T07:33:03,012 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/MasterData/WALs/9eb75de94ef3,33713,1734075181583/9eb75de94ef3%2C33713%2C1734075181583.1734075182994, exclude list is [], retry=0 2024-12-13T07:33:03,036 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33299,DS-3366fd89-6dd0-4619-a690-b626ebfdf962,DISK] 2024-12-13T07:33:03,036 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34409,DS-76d4a548-41e1-4f97-8915-a3f81cb2319e,DISK] 2024-12-13T07:33:03,036 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40823,DS-addf9c7a-b733-4006-9870-a44eb3beb221,DISK] 2024-12-13T07:33:03,039 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-13T07:33:03,082 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/MasterData/WALs/9eb75de94ef3,33713,1734075181583/9eb75de94ef3%2C33713%2C1734075181583.1734075182994 2024-12-13T07:33:03,084 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33473:33473),(127.0.0.1/127.0.0.1:35827:35827),(127.0.0.1/127.0.0.1:33377:33377)] 2024-12-13T07:33:03,084 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-13T07:33:03,085 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:33:03,090 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-13T07:33:03,092 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-13T07:33:03,147 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-13T07:33:03,175 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-13T07:33:03,179 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:33:03,182 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T07:33:03,183 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-13T07:33:03,192 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-13T07:33:03,192 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:33:03,194 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T07:33:03,194 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-13T07:33:03,198 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-13T07:33:03,198 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:33:03,200 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T07:33:03,200 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-13T07:33:03,205 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-13T07:33:03,206 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:33:03,207 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T07:33:03,214 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-13T07:33:03,215 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-13T07:33:03,226 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-13T07:33:03,230 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-13T07:33:03,235 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T07:33:03,236 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71765804, jitterRate=0.0693938136100769}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-13T07:33:03,240 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-13T07:33:03,241 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-13T07:33:03,276 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@503d0e21, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:33:03,316 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-13T07:33:03,334 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-13T07:33:03,335 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-13T07:33:03,337 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-13T07:33:03,338 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-12-13T07:33:03,344 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 5 msec 2024-12-13T07:33:03,345 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-13T07:33:03,379 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-13T07:33:03,393 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-13T07:33:03,399 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-13T07:33:03,403 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-13T07:33:03,404 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-13T07:33:03,415 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-13T07:33:03,418 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-13T07:33:03,423 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-13T07:33:03,432 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-13T07:33:03,434 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-13T07:33:03,440 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-13T07:33:03,451 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-13T07:33:03,465 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-13T07:33:03,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-13T07:33:03,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-13T07:33:03,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-13T07:33:03,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T07:33:03,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-13T07:33:03,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T07:33:03,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T07:33:03,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T07:33:03,483 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=9eb75de94ef3,33713,1734075181583, sessionid=0x1001e4e83250000, setting cluster-up flag (Was=false) 2024-12-13T07:33:03,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T07:33:03,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T07:33:03,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T07:33:03,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T07:33:03,574 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-13T07:33:03,575 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=9eb75de94ef3,33713,1734075181583 2024-12-13T07:33:03,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T07:33:03,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T07:33:03,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T07:33:03,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T07:33:03,624 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-13T07:33:03,625 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=9eb75de94ef3,33713,1734075181583 2024-12-13T07:33:03,677 DEBUG [RS:2;9eb75de94ef3:42189 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;9eb75de94ef3:42189 2024-12-13T07:33:03,681 DEBUG [RS:0;9eb75de94ef3:36021 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;9eb75de94ef3:36021 2024-12-13T07:33:03,687 INFO [RS:0;9eb75de94ef3:36021 {}] regionserver.HRegionServer(1008): ClusterId : ea5d5b9b-5313-4835-b0e8-96e026f51db6 2024-12-13T07:33:03,700 DEBUG [RS:0;9eb75de94ef3:36021 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-13T07:33:03,702 DEBUG [RS:1;9eb75de94ef3:41895 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;9eb75de94ef3:41895 2024-12-13T07:33:03,703 INFO [RS:2;9eb75de94ef3:42189 {}] regionserver.HRegionServer(1008): ClusterId : ea5d5b9b-5313-4835-b0e8-96e026f51db6 2024-12-13T07:33:03,704 DEBUG [RS:2;9eb75de94ef3:42189 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-13T07:33:03,724 INFO [RS:1;9eb75de94ef3:41895 {}] regionserver.HRegionServer(1008): ClusterId : ea5d5b9b-5313-4835-b0e8-96e026f51db6 2024-12-13T07:33:03,724 DEBUG [RS:1;9eb75de94ef3:41895 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-13T07:33:03,727 DEBUG [RS:0;9eb75de94ef3:36021 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-13T07:33:03,727 DEBUG [RS:0;9eb75de94ef3:36021 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-13T07:33:03,740 DEBUG [RS:2;9eb75de94ef3:42189 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-13T07:33:03,741 DEBUG [RS:2;9eb75de94ef3:42189 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-13T07:33:03,757 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] master.HMaster(3390): Registered master coprocessor service: service=AccessControlService 2024-12-13T07:33:03,774 DEBUG [RS:1;9eb75de94ef3:41895 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-13T07:33:03,774 DEBUG [RS:1;9eb75de94ef3:41895 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-13T07:33:03,777 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T07:33:03,778 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver loaded, priority=536870912. 2024-12-13T07:33:03,780 DEBUG [RS:0;9eb75de94ef3:36021 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-13T07:33:03,780 DEBUG [RS:2;9eb75de94ef3:42189 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-13T07:33:03,780 DEBUG [RS:0;9eb75de94ef3:36021 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11029f7f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:33:03,781 DEBUG [RS:2;9eb75de94ef3:42189 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@da32042, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:33:03,812 DEBUG [RS:1;9eb75de94ef3:41895 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-13T07:33:03,812 DEBUG [RS:1;9eb75de94ef3:41895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63fa7307, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:33:03,816 DEBUG [RS:2;9eb75de94ef3:42189 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@460b27a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9eb75de94ef3/172.17.0.2:0 2024-12-13T07:33:03,821 INFO [RS:2;9eb75de94ef3:42189 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-13T07:33:03,825 INFO [RS:2;9eb75de94ef3:42189 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-13T07:33:03,826 DEBUG [RS:2;9eb75de94ef3:42189 {}] regionserver.HRegionServer(900): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-13T07:33:03,826 INFO [RS:2;9eb75de94ef3:42189 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T07:33:03,827 DEBUG [RS:2;9eb75de94ef3:42189 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-13T07:33:03,830 INFO [RS:2;9eb75de94ef3:42189 {}] regionserver.HRegionServer(3073): reportForDuty to master=9eb75de94ef3,33713,1734075181583 with isa=9eb75de94ef3/172.17.0.2:42189, startcode=1734075182526 2024-12-13T07:33:03,836 DEBUG [RS:0;9eb75de94ef3:36021 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@eabca3f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9eb75de94ef3/172.17.0.2:0 2024-12-13T07:33:03,836 INFO [RS:0;9eb75de94ef3:36021 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-13T07:33:03,836 INFO [RS:0;9eb75de94ef3:36021 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-13T07:33:03,837 DEBUG [RS:0;9eb75de94ef3:36021 {}] regionserver.HRegionServer(900): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-13T07:33:03,842 INFO [RS:0;9eb75de94ef3:36021 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T07:33:03,842 DEBUG [RS:0;9eb75de94ef3:36021 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-13T07:33:03,845 INFO [RS:0;9eb75de94ef3:36021 {}] regionserver.HRegionServer(3073): reportForDuty to master=9eb75de94ef3,33713,1734075181583 with isa=9eb75de94ef3/172.17.0.2:36021, startcode=1734075182342 2024-12-13T07:33:03,847 DEBUG [RS:1;9eb75de94ef3:41895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70e3b742, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9eb75de94ef3/172.17.0.2:0 2024-12-13T07:33:03,848 INFO [RS:1;9eb75de94ef3:41895 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-13T07:33:03,848 INFO [RS:1;9eb75de94ef3:41895 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-13T07:33:03,848 DEBUG [RS:1;9eb75de94ef3:41895 {}] regionserver.HRegionServer(900): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-13T07:33:03,848 INFO [RS:1;9eb75de94ef3:41895 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T07:33:03,848 DEBUG [RS:1;9eb75de94ef3:41895 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-13T07:33:03,850 INFO [RS:1;9eb75de94ef3:41895 {}] regionserver.HRegionServer(3073): reportForDuty to master=9eb75de94ef3,33713,1734075181583 with isa=9eb75de94ef3/172.17.0.2:41895, startcode=1734075182430 2024-12-13T07:33:03,861 DEBUG [RS:0;9eb75de94ef3:36021 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-13T07:33:03,864 DEBUG [RS:2;9eb75de94ef3:42189 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-13T07:33:03,866 DEBUG [RS:1;9eb75de94ef3:41895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-13T07:33:03,926 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-13T07:33:03,936 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-13T07:33:03,946 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-13T07:33:03,948 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57873, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-13T07:33:03,948 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36273, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-13T07:33:03,948 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54237, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-13T07:33:03,957 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33713 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T07:33:03,970 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33713 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T07:33:03,971 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33713 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T07:33:03,974 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 9eb75de94ef3,33713,1734075181583 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-13T07:33:03,980 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/9eb75de94ef3:0, corePoolSize=5, maxPoolSize=5 2024-12-13T07:33:03,980 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/9eb75de94ef3:0, corePoolSize=5, maxPoolSize=5 2024-12-13T07:33:03,980 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/9eb75de94ef3:0, corePoolSize=5, maxPoolSize=5 2024-12-13T07:33:03,980 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/9eb75de94ef3:0, corePoolSize=5, maxPoolSize=5 2024-12-13T07:33:03,980 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/9eb75de94ef3:0, corePoolSize=10, maxPoolSize=10 2024-12-13T07:33:03,980 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/9eb75de94ef3:0, corePoolSize=1, maxPoolSize=1 2024-12-13T07:33:03,981 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/9eb75de94ef3:0, corePoolSize=2, maxPoolSize=2 2024-12-13T07:33:03,981 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/9eb75de94ef3:0, corePoolSize=1, maxPoolSize=1 2024-12-13T07:33:04,010 DEBUG [RS:1;9eb75de94ef3:41895 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-12-13T07:33:04,010 WARN [RS:1;9eb75de94ef3:41895 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-13T07:33:04,011 DEBUG [RS:2;9eb75de94ef3:42189 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-12-13T07:33:04,011 WARN [RS:2;9eb75de94ef3:42189 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-13T07:33:04,011 DEBUG [RS:0;9eb75de94ef3:36021 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-12-13T07:33:04,011 WARN [RS:0;9eb75de94ef3:36021 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-13T07:33:04,021 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-13T07:33:04,022 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-13T07:33:04,022 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1734075214022 2024-12-13T07:33:04,025 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-13T07:33:04,026 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-13T07:33:04,032 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-13T07:33:04,033 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-13T07:33:04,033 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-13T07:33:04,033 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-13T07:33:04,037 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-13T07:33:04,039 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-13T07:33:04,041 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-13T07:33:04,041 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-13T07:33:04,052 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:33:04,052 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-13T07:33:04,056 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-13T07:33:04,057 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-13T07:33:04,068 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/9eb75de94ef3:0:becomeActiveMaster-HFileCleaner.large.0-1734075184059,5,FailOnTimeoutGroup] 2024-12-13T07:33:04,073 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/9eb75de94ef3:0:becomeActiveMaster-HFileCleaner.small.0-1734075184068,5,FailOnTimeoutGroup] 2024-12-13T07:33:04,073 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-13T07:33:04,074 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-13T07:33:04,075 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-13T07:33:04,076 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-13T07:33:04,112 INFO [RS:1;9eb75de94ef3:41895 {}] regionserver.HRegionServer(3073): reportForDuty to master=9eb75de94ef3,33713,1734075181583 with isa=9eb75de94ef3/172.17.0.2:41895, startcode=1734075182430 2024-12-13T07:33:04,112 INFO [RS:2;9eb75de94ef3:42189 {}] regionserver.HRegionServer(3073): reportForDuty to master=9eb75de94ef3,33713,1734075181583 with isa=9eb75de94ef3/172.17.0.2:42189, startcode=1734075182526 2024-12-13T07:33:04,113 INFO [RS:0;9eb75de94ef3:36021 {}] regionserver.HRegionServer(3073): reportForDuty to master=9eb75de94ef3,33713,1734075181583 with isa=9eb75de94ef3/172.17.0.2:36021, startcode=1734075182342 2024-12-13T07:33:04,114 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33713 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 9eb75de94ef3,41895,1734075182430 2024-12-13T07:33:04,117 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33713 {}] master.ServerManager(486): Registering regionserver=9eb75de94ef3,41895,1734075182430 2024-12-13T07:33:04,132 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33713 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 9eb75de94ef3,42189,1734075182526 2024-12-13T07:33:04,133 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33713 {}] master.ServerManager(486): Registering regionserver=9eb75de94ef3,42189,1734075182526 2024-12-13T07:33:04,134 DEBUG [RS:1;9eb75de94ef3:41895 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:33:04,134 DEBUG [RS:1;9eb75de94ef3:41895 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:41477 2024-12-13T07:33:04,135 DEBUG [RS:1;9eb75de94ef3:41895 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-13T07:33:04,137 DEBUG [RS:2;9eb75de94ef3:42189 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:33:04,137 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33713 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 9eb75de94ef3,36021,1734075182342 2024-12-13T07:33:04,137 DEBUG [RS:2;9eb75de94ef3:42189 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:41477 2024-12-13T07:33:04,137 DEBUG [RS:2;9eb75de94ef3:42189 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-13T07:33:04,137 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33713 {}] master.ServerManager(486): Registering regionserver=9eb75de94ef3,36021,1734075182342 2024-12-13T07:33:04,142 DEBUG [RS:0;9eb75de94ef3:36021 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:33:04,142 DEBUG [RS:0;9eb75de94ef3:36021 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:41477 2024-12-13T07:33:04,142 DEBUG [RS:0;9eb75de94ef3:36021 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-13T07:33:04,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-13T07:33:04,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741831_1007 (size=1039) 2024-12-13T07:33:04,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741831_1007 (size=1039) 2024-12-13T07:33:04,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741831_1007 (size=1039) 2024-12-13T07:33:04,155 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-13T07:33:04,156 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:33:04,176 DEBUG [RS:2;9eb75de94ef3:42189 {}] zookeeper.ZKUtil(111): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/9eb75de94ef3,42189,1734075182526 2024-12-13T07:33:04,176 WARN [RS:2;9eb75de94ef3:42189 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-13T07:33:04,177 INFO [RS:2;9eb75de94ef3:42189 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-13T07:33:04,177 DEBUG [RS:0;9eb75de94ef3:36021 {}] zookeeper.ZKUtil(111): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/9eb75de94ef3,36021,1734075182342 2024-12-13T07:33:04,177 DEBUG [RS:2;9eb75de94ef3:42189 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/WALs/9eb75de94ef3,42189,1734075182526 2024-12-13T07:33:04,177 WARN [RS:0;9eb75de94ef3:36021 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-13T07:33:04,177 INFO [RS:0;9eb75de94ef3:36021 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-13T07:33:04,177 DEBUG [RS:0;9eb75de94ef3:36021 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/WALs/9eb75de94ef3,36021,1734075182342 2024-12-13T07:33:04,178 DEBUG [RS:1;9eb75de94ef3:41895 {}] zookeeper.ZKUtil(111): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/9eb75de94ef3,41895,1734075182430 2024-12-13T07:33:04,178 WARN [RS:1;9eb75de94ef3:41895 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-13T07:33:04,179 INFO [RS:1;9eb75de94ef3:41895 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-13T07:33:04,179 DEBUG [RS:1;9eb75de94ef3:41895 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/WALs/9eb75de94ef3,41895,1734075182430 2024-12-13T07:33:04,180 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [9eb75de94ef3,42189,1734075182526] 2024-12-13T07:33:04,180 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [9eb75de94ef3,41895,1734075182430] 2024-12-13T07:33:04,180 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [9eb75de94ef3,36021,1734075182342] 2024-12-13T07:33:04,210 DEBUG [RS:0;9eb75de94ef3:36021 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-13T07:33:04,220 DEBUG [RS:1;9eb75de94ef3:41895 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-13T07:33:04,220 DEBUG [RS:2;9eb75de94ef3:42189 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-13T07:33:04,225 INFO [RS:1;9eb75de94ef3:41895 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-13T07:33:04,225 INFO [RS:2;9eb75de94ef3:42189 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-13T07:33:04,226 INFO [RS:0;9eb75de94ef3:36021 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-13T07:33:04,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741832_1008 (size=32) 2024-12-13T07:33:04,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741832_1008 (size=32) 2024-12-13T07:33:04,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741832_1008 (size=32) 2024-12-13T07:33:04,289 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:33:04,292 INFO [RS:2;9eb75de94ef3:42189 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-13T07:33:04,296 INFO [RS:1;9eb75de94ef3:41895 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-13T07:33:04,300 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-13T07:33:04,302 INFO [RS:2;9eb75de94ef3:42189 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-13T07:33:04,303 INFO [RS:2;9eb75de94ef3:42189 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-13T07:33:04,304 INFO [RS:0;9eb75de94ef3:36021 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-13T07:33:04,304 INFO [RS:1;9eb75de94ef3:41895 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-13T07:33:04,304 INFO [RS:1;9eb75de94ef3:41895 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-13T07:33:04,304 INFO [RS:2;9eb75de94ef3:42189 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-13T07:33:04,305 INFO [RS:0;9eb75de94ef3:36021 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-13T07:33:04,305 INFO [RS:1;9eb75de94ef3:41895 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-13T07:33:04,305 INFO [RS:0;9eb75de94ef3:36021 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-13T07:33:04,306 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-13T07:33:04,306 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:33:04,307 INFO [RS:0;9eb75de94ef3:36021 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-13T07:33:04,308 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T07:33:04,308 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-13T07:33:04,311 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-13T07:33:04,312 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:33:04,313 INFO [RS:0;9eb75de94ef3:36021 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-13T07:33:04,313 INFO [RS:1;9eb75de94ef3:41895 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-13T07:33:04,313 DEBUG [RS:0;9eb75de94ef3:36021 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/9eb75de94ef3:0, corePoolSize=1, maxPoolSize=1 2024-12-13T07:33:04,313 DEBUG [RS:1;9eb75de94ef3:41895 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/9eb75de94ef3:0, corePoolSize=1, maxPoolSize=1 2024-12-13T07:33:04,313 DEBUG [RS:0;9eb75de94ef3:36021 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/9eb75de94ef3:0, corePoolSize=1, maxPoolSize=1 2024-12-13T07:33:04,313 DEBUG [RS:1;9eb75de94ef3:41895 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/9eb75de94ef3:0, corePoolSize=1, maxPoolSize=1 2024-12-13T07:33:04,313 DEBUG [RS:0;9eb75de94ef3:36021 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/9eb75de94ef3:0, corePoolSize=1, maxPoolSize=1 2024-12-13T07:33:04,313 DEBUG [RS:1;9eb75de94ef3:41895 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/9eb75de94ef3:0, corePoolSize=1, maxPoolSize=1 2024-12-13T07:33:04,314 DEBUG [RS:0;9eb75de94ef3:36021 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/9eb75de94ef3:0, corePoolSize=1, maxPoolSize=1 2024-12-13T07:33:04,314 DEBUG [RS:1;9eb75de94ef3:41895 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/9eb75de94ef3:0, corePoolSize=1, maxPoolSize=1 2024-12-13T07:33:04,314 DEBUG [RS:0;9eb75de94ef3:36021 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/9eb75de94ef3:0, corePoolSize=1, maxPoolSize=1 2024-12-13T07:33:04,314 DEBUG [RS:1;9eb75de94ef3:41895 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/9eb75de94ef3:0, corePoolSize=1, maxPoolSize=1 2024-12-13T07:33:04,314 DEBUG [RS:0;9eb75de94ef3:36021 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/9eb75de94ef3:0, corePoolSize=2, maxPoolSize=2 2024-12-13T07:33:04,314 DEBUG [RS:1;9eb75de94ef3:41895 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/9eb75de94ef3:0, corePoolSize=2, maxPoolSize=2 2024-12-13T07:33:04,314 DEBUG [RS:0;9eb75de94ef3:36021 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/9eb75de94ef3:0, corePoolSize=1, maxPoolSize=1 2024-12-13T07:33:04,314 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T07:33:04,314 DEBUG [RS:0;9eb75de94ef3:36021 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/9eb75de94ef3:0, corePoolSize=1, maxPoolSize=1 2024-12-13T07:33:04,315 DEBUG [RS:0;9eb75de94ef3:36021 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/9eb75de94ef3:0, corePoolSize=1, maxPoolSize=1 2024-12-13T07:33:04,315 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-13T07:33:04,315 DEBUG [RS:0;9eb75de94ef3:36021 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/9eb75de94ef3:0, corePoolSize=1, maxPoolSize=1 2024-12-13T07:33:04,315 DEBUG [RS:1;9eb75de94ef3:41895 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/9eb75de94ef3:0, corePoolSize=1, maxPoolSize=1 2024-12-13T07:33:04,315 DEBUG [RS:0;9eb75de94ef3:36021 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/9eb75de94ef3:0, corePoolSize=1, maxPoolSize=1 2024-12-13T07:33:04,315 DEBUG [RS:1;9eb75de94ef3:41895 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/9eb75de94ef3:0, corePoolSize=1, maxPoolSize=1 2024-12-13T07:33:04,315 DEBUG [RS:0;9eb75de94ef3:36021 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0, corePoolSize=3, maxPoolSize=3 2024-12-13T07:33:04,315 DEBUG [RS:1;9eb75de94ef3:41895 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/9eb75de94ef3:0, corePoolSize=1, maxPoolSize=1 2024-12-13T07:33:04,315 DEBUG [RS:0;9eb75de94ef3:36021 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/9eb75de94ef3:0, corePoolSize=3, maxPoolSize=3 2024-12-13T07:33:04,315 DEBUG [RS:1;9eb75de94ef3:41895 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/9eb75de94ef3:0, corePoolSize=1, maxPoolSize=1 2024-12-13T07:33:04,316 DEBUG [RS:1;9eb75de94ef3:41895 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/9eb75de94ef3:0, corePoolSize=1, maxPoolSize=1 2024-12-13T07:33:04,316 DEBUG [RS:1;9eb75de94ef3:41895 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0, corePoolSize=3, maxPoolSize=3 2024-12-13T07:33:04,316 DEBUG [RS:1;9eb75de94ef3:41895 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/9eb75de94ef3:0, corePoolSize=3, maxPoolSize=3 2024-12-13T07:33:04,317 INFO [RS:2;9eb75de94ef3:42189 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-13T07:33:04,317 DEBUG [RS:2;9eb75de94ef3:42189 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/9eb75de94ef3:0, corePoolSize=1, maxPoolSize=1 2024-12-13T07:33:04,317 DEBUG [RS:2;9eb75de94ef3:42189 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/9eb75de94ef3:0, corePoolSize=1, maxPoolSize=1 2024-12-13T07:33:04,318 DEBUG [RS:2;9eb75de94ef3:42189 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/9eb75de94ef3:0, corePoolSize=1, maxPoolSize=1 2024-12-13T07:33:04,318 DEBUG [RS:2;9eb75de94ef3:42189 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/9eb75de94ef3:0, corePoolSize=1, maxPoolSize=1 2024-12-13T07:33:04,318 DEBUG [RS:2;9eb75de94ef3:42189 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/9eb75de94ef3:0, corePoolSize=1, maxPoolSize=1 2024-12-13T07:33:04,318 DEBUG [RS:2;9eb75de94ef3:42189 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/9eb75de94ef3:0, corePoolSize=2, maxPoolSize=2 2024-12-13T07:33:04,318 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-13T07:33:04,318 DEBUG [RS:2;9eb75de94ef3:42189 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/9eb75de94ef3:0, corePoolSize=1, maxPoolSize=1 2024-12-13T07:33:04,318 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:33:04,318 DEBUG [RS:2;9eb75de94ef3:42189 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/9eb75de94ef3:0, corePoolSize=1, maxPoolSize=1 2024-12-13T07:33:04,319 DEBUG [RS:2;9eb75de94ef3:42189 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/9eb75de94ef3:0, corePoolSize=1, maxPoolSize=1 2024-12-13T07:33:04,319 DEBUG [RS:2;9eb75de94ef3:42189 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/9eb75de94ef3:0, corePoolSize=1, maxPoolSize=1 2024-12-13T07:33:04,319 DEBUG [RS:2;9eb75de94ef3:42189 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/9eb75de94ef3:0, corePoolSize=1, maxPoolSize=1 2024-12-13T07:33:04,319 DEBUG [RS:2;9eb75de94ef3:42189 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0, corePoolSize=3, maxPoolSize=3 2024-12-13T07:33:04,319 DEBUG [RS:2;9eb75de94ef3:42189 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/9eb75de94ef3:0, corePoolSize=3, maxPoolSize=3 2024-12-13T07:33:04,320 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T07:33:04,322 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/hbase/meta/1588230740 2024-12-13T07:33:04,323 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/hbase/meta/1588230740 2024-12-13T07:33:04,328 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-13T07:33:04,331 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-13T07:33:04,331 INFO [RS:0;9eb75de94ef3:36021 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-13T07:33:04,332 INFO [RS:0;9eb75de94ef3:36021 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-13T07:33:04,332 INFO [RS:0;9eb75de94ef3:36021 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-13T07:33:04,332 INFO [RS:0;9eb75de94ef3:36021 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-13T07:33:04,332 INFO [RS:0;9eb75de94ef3:36021 {}] hbase.ChoreService(168): Chore ScheduledChore name=9eb75de94ef3,36021,1734075182342-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-13T07:33:04,333 INFO [RS:1;9eb75de94ef3:41895 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-13T07:33:04,334 INFO [RS:1;9eb75de94ef3:41895 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-13T07:33:04,334 INFO [RS:1;9eb75de94ef3:41895 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-13T07:33:04,334 INFO [RS:1;9eb75de94ef3:41895 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-13T07:33:04,334 INFO [RS:1;9eb75de94ef3:41895 {}] hbase.ChoreService(168): Chore ScheduledChore name=9eb75de94ef3,41895,1734075182430-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-13T07:33:04,335 INFO [RS:2;9eb75de94ef3:42189 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-13T07:33:04,335 INFO [RS:2;9eb75de94ef3:42189 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-13T07:33:04,335 INFO [RS:2;9eb75de94ef3:42189 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-13T07:33:04,335 INFO [RS:2;9eb75de94ef3:42189 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-13T07:33:04,335 INFO [RS:2;9eb75de94ef3:42189 {}] hbase.ChoreService(168): Chore ScheduledChore name=9eb75de94ef3,42189,1734075182526-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-13T07:33:04,340 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T07:33:04,342 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63312625, jitterRate=-0.05656836926937103}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-13T07:33:04,344 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-13T07:33:04,344 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-13T07:33:04,344 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-13T07:33:04,344 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-13T07:33:04,344 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-13T07:33:04,344 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-13T07:33:04,348 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-13T07:33:04,348 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-13T07:33:04,351 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-13T07:33:04,351 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-13T07:33:04,355 INFO [RS:0;9eb75de94ef3:36021 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-13T07:33:04,357 INFO [RS:0;9eb75de94ef3:36021 {}] hbase.ChoreService(168): Chore ScheduledChore name=9eb75de94ef3,36021,1734075182342-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-13T07:33:04,358 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-13T07:33:04,359 INFO [RS:1;9eb75de94ef3:41895 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-13T07:33:04,360 INFO [RS:1;9eb75de94ef3:41895 {}] hbase.ChoreService(168): Chore ScheduledChore name=9eb75de94ef3,41895,1734075182430-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-13T07:33:04,361 INFO [RS:2;9eb75de94ef3:42189 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-13T07:33:04,361 INFO [RS:2;9eb75de94ef3:42189 {}] hbase.ChoreService(168): Chore ScheduledChore name=9eb75de94ef3,42189,1734075182526-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-13T07:33:04,371 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-13T07:33:04,379 INFO [RS:0;9eb75de94ef3:36021 {}] regionserver.Replication(204): 9eb75de94ef3,36021,1734075182342 started 2024-12-13T07:33:04,380 INFO [RS:0;9eb75de94ef3:36021 {}] regionserver.HRegionServer(1767): Serving as 9eb75de94ef3,36021,1734075182342, RpcServer on 9eb75de94ef3/172.17.0.2:36021, sessionid=0x1001e4e83250001 2024-12-13T07:33:04,381 DEBUG [RS:0;9eb75de94ef3:36021 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-13T07:33:04,381 DEBUG [RS:0;9eb75de94ef3:36021 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 9eb75de94ef3,36021,1734075182342 2024-12-13T07:33:04,381 DEBUG [RS:0;9eb75de94ef3:36021 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9eb75de94ef3,36021,1734075182342' 2024-12-13T07:33:04,381 DEBUG [RS:0;9eb75de94ef3:36021 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-13T07:33:04,382 DEBUG [RS:0;9eb75de94ef3:36021 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-13T07:33:04,383 DEBUG [RS:0;9eb75de94ef3:36021 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-13T07:33:04,383 DEBUG [RS:0;9eb75de94ef3:36021 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-13T07:33:04,383 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-13T07:33:04,383 DEBUG [RS:0;9eb75de94ef3:36021 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 9eb75de94ef3,36021,1734075182342 2024-12-13T07:33:04,383 DEBUG [RS:0;9eb75de94ef3:36021 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9eb75de94ef3,36021,1734075182342' 2024-12-13T07:33:04,383 DEBUG [RS:0;9eb75de94ef3:36021 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-13T07:33:04,384 DEBUG [RS:0;9eb75de94ef3:36021 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-13T07:33:04,385 DEBUG [RS:0;9eb75de94ef3:36021 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-13T07:33:04,385 INFO [RS:0;9eb75de94ef3:36021 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-13T07:33:04,385 INFO [RS:0;9eb75de94ef3:36021 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-13T07:33:04,388 INFO [RS:2;9eb75de94ef3:42189 {}] regionserver.Replication(204): 9eb75de94ef3,42189,1734075182526 started 2024-12-13T07:33:04,388 INFO [RS:2;9eb75de94ef3:42189 {}] regionserver.HRegionServer(1767): Serving as 9eb75de94ef3,42189,1734075182526, RpcServer on 9eb75de94ef3/172.17.0.2:42189, sessionid=0x1001e4e83250003 2024-12-13T07:33:04,388 DEBUG [RS:2;9eb75de94ef3:42189 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-13T07:33:04,389 DEBUG [RS:2;9eb75de94ef3:42189 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 9eb75de94ef3,42189,1734075182526 2024-12-13T07:33:04,389 DEBUG [RS:2;9eb75de94ef3:42189 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9eb75de94ef3,42189,1734075182526' 2024-12-13T07:33:04,389 DEBUG [RS:2;9eb75de94ef3:42189 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-13T07:33:04,390 INFO [RS:1;9eb75de94ef3:41895 {}] regionserver.Replication(204): 9eb75de94ef3,41895,1734075182430 started 2024-12-13T07:33:04,390 DEBUG [RS:2;9eb75de94ef3:42189 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-13T07:33:04,390 INFO [RS:1;9eb75de94ef3:41895 {}] regionserver.HRegionServer(1767): Serving as 9eb75de94ef3,41895,1734075182430, RpcServer on 9eb75de94ef3/172.17.0.2:41895, sessionid=0x1001e4e83250002 2024-12-13T07:33:04,390 DEBUG [RS:1;9eb75de94ef3:41895 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-13T07:33:04,390 DEBUG [RS:1;9eb75de94ef3:41895 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 9eb75de94ef3,41895,1734075182430 2024-12-13T07:33:04,390 DEBUG [RS:1;9eb75de94ef3:41895 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9eb75de94ef3,41895,1734075182430' 2024-12-13T07:33:04,390 DEBUG [RS:1;9eb75de94ef3:41895 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-13T07:33:04,390 DEBUG [RS:2;9eb75de94ef3:42189 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-13T07:33:04,390 DEBUG [RS:2;9eb75de94ef3:42189 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-13T07:33:04,390 DEBUG [RS:2;9eb75de94ef3:42189 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 9eb75de94ef3,42189,1734075182526 2024-12-13T07:33:04,391 DEBUG [RS:2;9eb75de94ef3:42189 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9eb75de94ef3,42189,1734075182526' 2024-12-13T07:33:04,391 DEBUG [RS:2;9eb75de94ef3:42189 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-13T07:33:04,391 DEBUG [RS:1;9eb75de94ef3:41895 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-13T07:33:04,396 DEBUG [RS:2;9eb75de94ef3:42189 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-13T07:33:04,396 DEBUG [RS:1;9eb75de94ef3:41895 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-13T07:33:04,396 DEBUG [RS:1;9eb75de94ef3:41895 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-13T07:33:04,396 DEBUG [RS:1;9eb75de94ef3:41895 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 9eb75de94ef3,41895,1734075182430 2024-12-13T07:33:04,396 DEBUG [RS:1;9eb75de94ef3:41895 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9eb75de94ef3,41895,1734075182430' 2024-12-13T07:33:04,396 DEBUG [RS:1;9eb75de94ef3:41895 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-13T07:33:04,397 DEBUG [RS:2;9eb75de94ef3:42189 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-13T07:33:04,397 INFO [RS:2;9eb75de94ef3:42189 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-13T07:33:04,397 INFO [RS:2;9eb75de94ef3:42189 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-13T07:33:04,397 DEBUG [RS:1;9eb75de94ef3:41895 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-13T07:33:04,404 DEBUG [RS:1;9eb75de94ef3:41895 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-13T07:33:04,404 INFO [RS:1;9eb75de94ef3:41895 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-13T07:33:04,405 INFO [RS:1;9eb75de94ef3:41895 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-13T07:33:04,490 INFO [RS:0;9eb75de94ef3:36021 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-13T07:33:04,492 INFO [RS:0;9eb75de94ef3:36021 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9eb75de94ef3%2C36021%2C1734075182342, suffix=, logDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/WALs/9eb75de94ef3,36021,1734075182342, archiveDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/oldWALs, maxLogs=32 2024-12-13T07:33:04,498 INFO [RS:2;9eb75de94ef3:42189 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-13T07:33:04,501 INFO [RS:2;9eb75de94ef3:42189 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9eb75de94ef3%2C42189%2C1734075182526, suffix=, logDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/WALs/9eb75de94ef3,42189,1734075182526, archiveDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/oldWALs, maxLogs=32 2024-12-13T07:33:04,505 INFO [RS:1;9eb75de94ef3:41895 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-13T07:33:04,508 DEBUG [RS:0;9eb75de94ef3:36021 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/WALs/9eb75de94ef3,36021,1734075182342/9eb75de94ef3%2C36021%2C1734075182342.1734075184494, exclude list is [], retry=0 2024-12-13T07:33:04,508 INFO [RS:1;9eb75de94ef3:41895 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9eb75de94ef3%2C41895%2C1734075182430, suffix=, logDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/WALs/9eb75de94ef3,41895,1734075182430, archiveDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/oldWALs, maxLogs=32 2024-12-13T07:33:04,513 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34409,DS-76d4a548-41e1-4f97-8915-a3f81cb2319e,DISK] 2024-12-13T07:33:04,513 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33299,DS-3366fd89-6dd0-4619-a690-b626ebfdf962,DISK] 2024-12-13T07:33:04,513 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40823,DS-addf9c7a-b733-4006-9870-a44eb3beb221,DISK] 2024-12-13T07:33:04,522 DEBUG [RS:2;9eb75de94ef3:42189 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/WALs/9eb75de94ef3,42189,1734075182526/9eb75de94ef3%2C42189%2C1734075182526.1734075184503, exclude list is [], retry=0 2024-12-13T07:33:04,523 DEBUG [RS:1;9eb75de94ef3:41895 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/WALs/9eb75de94ef3,41895,1734075182430/9eb75de94ef3%2C41895%2C1734075182430.1734075184510, exclude list is [], retry=0 2024-12-13T07:33:04,525 INFO [RS:0;9eb75de94ef3:36021 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/WALs/9eb75de94ef3,36021,1734075182342/9eb75de94ef3%2C36021%2C1734075182342.1734075184494 2024-12-13T07:33:04,526 DEBUG [RS:0;9eb75de94ef3:36021 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33473:33473),(127.0.0.1/127.0.0.1:35827:35827),(127.0.0.1/127.0.0.1:33377:33377)] 2024-12-13T07:33:04,527 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33299,DS-3366fd89-6dd0-4619-a690-b626ebfdf962,DISK] 2024-12-13T07:33:04,527 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40823,DS-addf9c7a-b733-4006-9870-a44eb3beb221,DISK] 2024-12-13T07:33:04,534 WARN [9eb75de94ef3:33713 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-13T07:33:04,572 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40823,DS-addf9c7a-b733-4006-9870-a44eb3beb221,DISK] 2024-12-13T07:33:04,572 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33299,DS-3366fd89-6dd0-4619-a690-b626ebfdf962,DISK] 2024-12-13T07:33:04,572 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34409,DS-76d4a548-41e1-4f97-8915-a3f81cb2319e,DISK] 2024-12-13T07:33:04,572 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34409,DS-76d4a548-41e1-4f97-8915-a3f81cb2319e,DISK] 2024-12-13T07:33:04,576 INFO [RS:1;9eb75de94ef3:41895 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/WALs/9eb75de94ef3,41895,1734075182430/9eb75de94ef3%2C41895%2C1734075182430.1734075184510 2024-12-13T07:33:04,577 INFO [RS:2;9eb75de94ef3:42189 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/WALs/9eb75de94ef3,42189,1734075182526/9eb75de94ef3%2C42189%2C1734075182526.1734075184503 2024-12-13T07:33:04,579 DEBUG [RS:1;9eb75de94ef3:41895 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33473:33473),(127.0.0.1/127.0.0.1:33377:33377),(127.0.0.1/127.0.0.1:35827:35827)] 2024-12-13T07:33:04,579 DEBUG [RS:2;9eb75de94ef3:42189 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33377:33377),(127.0.0.1/127.0.0.1:33473:33473),(127.0.0.1/127.0.0.1:35827:35827)] 2024-12-13T07:33:04,786 DEBUG [9eb75de94ef3:33713 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-13T07:33:04,789 DEBUG [9eb75de94ef3:33713 {}] balancer.BalancerClusterState(202): Hosts are {9eb75de94ef3=0} racks are {/default-rack=0} 2024-12-13T07:33:04,803 DEBUG [9eb75de94ef3:33713 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-13T07:33:04,803 DEBUG [9eb75de94ef3:33713 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-13T07:33:04,804 DEBUG [9eb75de94ef3:33713 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-13T07:33:04,804 INFO [9eb75de94ef3:33713 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-13T07:33:04,804 INFO [9eb75de94ef3:33713 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-13T07:33:04,804 INFO [9eb75de94ef3:33713 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-13T07:33:04,804 DEBUG [9eb75de94ef3:33713 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-13T07:33:04,810 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=9eb75de94ef3,42189,1734075182526 2024-12-13T07:33:04,819 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 9eb75de94ef3,42189,1734075182526, state=OPENING 2024-12-13T07:33:04,832 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-13T07:33:04,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T07:33:04,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T07:33:04,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T07:33:04,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T07:33:04,841 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T07:33:04,842 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T07:33:04,843 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T07:33:04,844 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T07:33:04,845 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=9eb75de94ef3,42189,1734075182526}] 2024-12-13T07:33:05,027 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,42189,1734075182526 2024-12-13T07:33:05,030 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-13T07:33:05,034 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58078, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-13T07:33:05,048 INFO [RS_OPEN_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-13T07:33:05,048 INFO [RS_OPEN_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-13T07:33:05,049 INFO [RS_OPEN_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-13T07:33:05,053 INFO [RS_OPEN_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9eb75de94ef3%2C42189%2C1734075182526.meta, suffix=.meta, logDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/WALs/9eb75de94ef3,42189,1734075182526, archiveDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/oldWALs, maxLogs=32 2024-12-13T07:33:05,071 DEBUG [RS_OPEN_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/WALs/9eb75de94ef3,42189,1734075182526/9eb75de94ef3%2C42189%2C1734075182526.meta.1734075185055.meta, exclude list is [], retry=0 2024-12-13T07:33:05,075 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34409,DS-76d4a548-41e1-4f97-8915-a3f81cb2319e,DISK] 2024-12-13T07:33:05,075 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40823,DS-addf9c7a-b733-4006-9870-a44eb3beb221,DISK] 2024-12-13T07:33:05,075 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33299,DS-3366fd89-6dd0-4619-a690-b626ebfdf962,DISK] 2024-12-13T07:33:05,079 INFO [RS_OPEN_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/WALs/9eb75de94ef3,42189,1734075182526/9eb75de94ef3%2C42189%2C1734075182526.meta.1734075185055.meta 2024-12-13T07:33:05,079 DEBUG [RS_OPEN_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33473:33473),(127.0.0.1/127.0.0.1:33377:33377),(127.0.0.1/127.0.0.1:35827:35827)] 2024-12-13T07:33:05,080 DEBUG [RS_OPEN_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-13T07:33:05,081 DEBUG [RS_OPEN_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=AccessControlService 2024-12-13T07:33:05,082 INFO [RS_OPEN_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T07:33:05,083 DEBUG [RS_OPEN_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-13T07:33:05,085 DEBUG [RS_OPEN_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-13T07:33:05,086 INFO [RS_OPEN_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-13T07:33:05,097 DEBUG [RS_OPEN_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-13T07:33:05,097 DEBUG [RS_OPEN_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:33:05,097 DEBUG [RS_OPEN_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-13T07:33:05,097 DEBUG [RS_OPEN_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-13T07:33:05,100 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-13T07:33:05,102 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-13T07:33:05,102 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:33:05,103 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T07:33:05,103 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-13T07:33:05,105 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-13T07:33:05,105 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:33:05,106 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T07:33:05,106 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-13T07:33:05,108 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-13T07:33:05,108 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:33:05,110 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T07:33:05,112 DEBUG [RS_OPEN_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/hbase/meta/1588230740 2024-12-13T07:33:05,115 DEBUG [RS_OPEN_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/hbase/meta/1588230740 2024-12-13T07:33:05,118 DEBUG [RS_OPEN_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-13T07:33:05,120 DEBUG [RS_OPEN_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-13T07:33:05,122 INFO [RS_OPEN_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73487051, jitterRate=0.0950423926115036}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-13T07:33:05,125 DEBUG [RS_OPEN_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-13T07:33:05,132 INFO [RS_OPEN_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1734075185019 2024-12-13T07:33:05,146 DEBUG [RS_OPEN_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-13T07:33:05,147 INFO [RS_OPEN_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-13T07:33:05,148 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=9eb75de94ef3,42189,1734075182526 2024-12-13T07:33:05,152 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 9eb75de94ef3,42189,1734075182526, state=OPEN 2024-12-13T07:33:05,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-13T07:33:05,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-13T07:33:05,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-13T07:33:05,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-13T07:33:05,165 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T07:33:05,165 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T07:33:05,165 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T07:33:05,165 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T07:33:05,171 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-13T07:33:05,171 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=9eb75de94ef3,42189,1734075182526 in 320 msec 2024-12-13T07:33:05,184 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-13T07:33:05,184 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 814 msec 2024-12-13T07:33:05,202 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.4060 sec 2024-12-13T07:33:05,203 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1734075185203, completionTime=-1 2024-12-13T07:33:05,203 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-13T07:33:05,203 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-13T07:33:05,251 DEBUG [hconnection-0x6218feab-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:33:05,255 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58088, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:33:05,286 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=3 2024-12-13T07:33:05,286 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1734075245286 2024-12-13T07:33:05,286 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1734075305286 2024-12-13T07:33:05,286 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 82 msec 2024-12-13T07:33:05,334 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] balancer.RegionLocationFinder(172): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-13T07:33:05,343 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9eb75de94ef3,33713,1734075181583-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-13T07:33:05,343 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9eb75de94ef3,33713,1734075181583-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-13T07:33:05,343 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9eb75de94ef3,33713,1734075181583-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-13T07:33:05,345 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-9eb75de94ef3:33713, period=300000, unit=MILLISECONDS is enabled. 2024-12-13T07:33:05,345 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-13T07:33:05,353 DEBUG [master/9eb75de94ef3:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-13T07:33:05,363 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-13T07:33:05,365 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-13T07:33:05,373 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-13T07:33:05,376 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T07:33:05,378 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:33:05,380 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T07:33:05,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741837_1013 (size=358) 2024-12-13T07:33:05,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741837_1013 (size=358) 2024-12-13T07:33:05,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741837_1013 (size=358) 2024-12-13T07:33:05,398 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 4ee0c6377199ce786a0b82a4e00c172f, NAME => 'hbase:namespace,,1734075185364.4ee0c6377199ce786a0b82a4e00c172f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:33:05,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741838_1014 (size=42) 2024-12-13T07:33:05,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741838_1014 (size=42) 2024-12-13T07:33:05,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741838_1014 (size=42) 2024-12-13T07:33:05,416 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734075185364.4ee0c6377199ce786a0b82a4e00c172f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:33:05,416 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 4ee0c6377199ce786a0b82a4e00c172f, disabling compactions & flushes 2024-12-13T07:33:05,416 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734075185364.4ee0c6377199ce786a0b82a4e00c172f. 2024-12-13T07:33:05,416 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734075185364.4ee0c6377199ce786a0b82a4e00c172f. 2024-12-13T07:33:05,416 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734075185364.4ee0c6377199ce786a0b82a4e00c172f. after waiting 0 ms 2024-12-13T07:33:05,416 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734075185364.4ee0c6377199ce786a0b82a4e00c172f. 2024-12-13T07:33:05,417 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1734075185364.4ee0c6377199ce786a0b82a4e00c172f. 2024-12-13T07:33:05,417 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 4ee0c6377199ce786a0b82a4e00c172f: 2024-12-13T07:33:05,419 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T07:33:05,425 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1734075185364.4ee0c6377199ce786a0b82a4e00c172f.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1734075185420"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734075185420"}]},"ts":"1734075185420"} 2024-12-13T07:33:05,446 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-13T07:33:05,448 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T07:33:05,452 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075185449"}]},"ts":"1734075185449"} 2024-12-13T07:33:05,457 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-13T07:33:05,474 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(202): Hosts are {9eb75de94ef3=0} racks are {/default-rack=0} 2024-12-13T07:33:05,475 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-13T07:33:05,475 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-13T07:33:05,475 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-13T07:33:05,475 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-13T07:33:05,475 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-13T07:33:05,475 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-13T07:33:05,475 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-13T07:33:05,477 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=4ee0c6377199ce786a0b82a4e00c172f, ASSIGN}] 2024-12-13T07:33:05,479 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=4ee0c6377199ce786a0b82a4e00c172f, ASSIGN 2024-12-13T07:33:05,481 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=4ee0c6377199ce786a0b82a4e00c172f, ASSIGN; state=OFFLINE, location=9eb75de94ef3,41895,1734075182430; forceNewPlan=false, retain=false 2024-12-13T07:33:05,632 INFO [9eb75de94ef3:33713 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-13T07:33:05,633 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=4ee0c6377199ce786a0b82a4e00c172f, regionState=OPENING, regionLocation=9eb75de94ef3,41895,1734075182430 2024-12-13T07:33:05,636 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 4ee0c6377199ce786a0b82a4e00c172f, server=9eb75de94ef3,41895,1734075182430}] 2024-12-13T07:33:05,790 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,41895,1734075182430 2024-12-13T07:33:05,790 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-13T07:33:05,793 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33906, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-13T07:33:05,798 INFO [RS_OPEN_PRIORITY_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1734075185364.4ee0c6377199ce786a0b82a4e00c172f. 2024-12-13T07:33:05,798 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 4ee0c6377199ce786a0b82a4e00c172f, NAME => 'hbase:namespace,,1734075185364.4ee0c6377199ce786a0b82a4e00c172f.', STARTKEY => '', ENDKEY => ''} 2024-12-13T07:33:05,798 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:namespace,,1734075185364.4ee0c6377199ce786a0b82a4e00c172f. service=AccessControlService 2024-12-13T07:33:05,799 INFO [RS_OPEN_PRIORITY_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T07:33:05,799 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 4ee0c6377199ce786a0b82a4e00c172f 2024-12-13T07:33:05,799 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734075185364.4ee0c6377199ce786a0b82a4e00c172f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:33:05,799 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 4ee0c6377199ce786a0b82a4e00c172f 2024-12-13T07:33:05,799 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 4ee0c6377199ce786a0b82a4e00c172f 2024-12-13T07:33:05,802 INFO [StoreOpener-4ee0c6377199ce786a0b82a4e00c172f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 4ee0c6377199ce786a0b82a4e00c172f 2024-12-13T07:33:05,804 INFO [StoreOpener-4ee0c6377199ce786a0b82a4e00c172f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4ee0c6377199ce786a0b82a4e00c172f columnFamilyName info 2024-12-13T07:33:05,804 DEBUG [StoreOpener-4ee0c6377199ce786a0b82a4e00c172f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:33:05,806 INFO [StoreOpener-4ee0c6377199ce786a0b82a4e00c172f-1 {}] regionserver.HStore(327): Store=4ee0c6377199ce786a0b82a4e00c172f/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T07:33:05,808 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/hbase/namespace/4ee0c6377199ce786a0b82a4e00c172f 2024-12-13T07:33:05,809 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/hbase/namespace/4ee0c6377199ce786a0b82a4e00c172f 2024-12-13T07:33:05,818 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 4ee0c6377199ce786a0b82a4e00c172f 2024-12-13T07:33:05,824 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/hbase/namespace/4ee0c6377199ce786a0b82a4e00c172f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T07:33:05,826 INFO [RS_OPEN_PRIORITY_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 4ee0c6377199ce786a0b82a4e00c172f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60448324, jitterRate=-0.09924978017807007}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T07:33:05,827 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 4ee0c6377199ce786a0b82a4e00c172f: 2024-12-13T07:33:05,830 INFO [RS_OPEN_PRIORITY_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1734075185364.4ee0c6377199ce786a0b82a4e00c172f., pid=6, masterSystemTime=1734075185790 2024-12-13T07:33:05,836 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1734075185364.4ee0c6377199ce786a0b82a4e00c172f. 2024-12-13T07:33:05,836 INFO [RS_OPEN_PRIORITY_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1734075185364.4ee0c6377199ce786a0b82a4e00c172f. 2024-12-13T07:33:05,837 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=4ee0c6377199ce786a0b82a4e00c172f, regionState=OPEN, openSeqNum=2, regionLocation=9eb75de94ef3,41895,1734075182430 2024-12-13T07:33:05,856 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-13T07:33:05,858 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 4ee0c6377199ce786a0b82a4e00c172f, server=9eb75de94ef3,41895,1734075182430 in 209 msec 2024-12-13T07:33:05,863 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-13T07:33:05,863 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=4ee0c6377199ce786a0b82a4e00c172f, ASSIGN in 379 msec 2024-12-13T07:33:05,865 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T07:33:05,866 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075185866"}]},"ts":"1734075185866"} 2024-12-13T07:33:05,870 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-13T07:33:05,884 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-13T07:33:05,884 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T07:33:05,888 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 519 msec 2024-12-13T07:33:05,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-13T07:33:05,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T07:33:05,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T07:33:05,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T07:33:05,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T07:33:05,922 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:33:05,926 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33908, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:33:05,938 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-13T07:33:05,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-13T07:33:05,979 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 45 msec 2024-12-13T07:33:05,984 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-13T07:33:06,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-13T07:33:06,027 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 43 msec 2024-12-13T07:33:06,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-13T07:33:06,073 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-13T07:33:06,074 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 3.447sec 2024-12-13T07:33:06,076 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-13T07:33:06,078 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-13T07:33:06,079 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-13T07:33:06,080 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-13T07:33:06,080 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-13T07:33:06,082 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9eb75de94ef3,33713,1734075181583-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-13T07:33:06,082 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9eb75de94ef3,33713,1734075181583-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-13T07:33:06,102 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] master.HMaster$4(2389): Client=null/null create 'hbase:acl', {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-13T07:33:06,104 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:acl 2024-12-13T07:33:06,106 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T07:33:06,106 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:33:06,109 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T07:33:06,109 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] master.MasterRpcServices(713): Client=null/null procedure request for creating table: namespace: "hbase" qualifier: "acl" procId is: 9 2024-12-13T07:33:06,135 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-13T07:33:06,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741839_1015 (size=349) 2024-12-13T07:33:06,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741839_1015 (size=349) 2024-12-13T07:33:06,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741839_1015 (size=349) 2024-12-13T07:33:06,180 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 4825f6265c91ad571de6370b4fecbf02, NAME => 'hbase:acl,,1734075186098.4825f6265c91ad571de6370b4fecbf02.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:acl', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:33:06,187 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1b906bf1 to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4a365fc5 2024-12-13T07:33:06,188 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-12-13T07:33:06,201 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3dd3141d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:33:06,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741840_1016 (size=36) 2024-12-13T07:33:06,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741840_1016 (size=36) 2024-12-13T07:33:06,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741840_1016 (size=36) 2024-12-13T07:33:06,207 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-13T07:33:06,207 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-13T07:33:06,213 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:acl,,1734075186098.4825f6265c91ad571de6370b4fecbf02.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:33:06,213 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1681): Closing 4825f6265c91ad571de6370b4fecbf02, disabling compactions & flushes 2024-12-13T07:33:06,214 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:acl,,1734075186098.4825f6265c91ad571de6370b4fecbf02. 2024-12-13T07:33:06,214 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:acl,,1734075186098.4825f6265c91ad571de6370b4fecbf02. 2024-12-13T07:33:06,214 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:acl,,1734075186098.4825f6265c91ad571de6370b4fecbf02. after waiting 0 ms 2024-12-13T07:33:06,214 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:acl,,1734075186098.4825f6265c91ad571de6370b4fecbf02. 2024-12-13T07:33:06,214 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1922): Closed hbase:acl,,1734075186098.4825f6265c91ad571de6370b4fecbf02. 2024-12-13T07:33:06,214 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1635): Region close journal for 4825f6265c91ad571de6370b4fecbf02: 2024-12-13T07:33:06,216 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T07:33:06,217 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:acl,,1734075186098.4825f6265c91ad571de6370b4fecbf02.","families":{"info":[{"qualifier":"regioninfo","vlen":35,"tag":[],"timestamp":"1734075186216"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734075186216"}]},"ts":"1734075186216"} 2024-12-13T07:33:06,221 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-13T07:33:06,223 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T07:33:06,223 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075186223"}]},"ts":"1734075186223"} 2024-12-13T07:33:06,223 DEBUG [hconnection-0x26bbbc3b-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:33:06,227 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:acl, state=ENABLING in hbase:meta 2024-12-13T07:33:06,238 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-13T07:33:06,242 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(202): Hosts are {9eb75de94ef3=0} racks are {/default-rack=0} 2024-12-13T07:33:06,243 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58096, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:33:06,245 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-13T07:33:06,245 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-13T07:33:06,245 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-13T07:33:06,245 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-13T07:33:06,245 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-13T07:33:06,245 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-13T07:33:06,245 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-13T07:33:06,246 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:acl, region=4825f6265c91ad571de6370b4fecbf02, ASSIGN}] 2024-12-13T07:33:06,248 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:acl, region=4825f6265c91ad571de6370b4fecbf02, ASSIGN 2024-12-13T07:33:06,250 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=9eb75de94ef3,33713,1734075181583 2024-12-13T07:33:06,251 INFO [Time-limited test {}] hbase.HBaseTestingUtility(2790): Starting mini mapreduce cluster... 2024-12-13T07:33:06,251 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/test.cache.data in system properties and HBase conf 2024-12-13T07:33:06,251 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/hadoop.tmp.dir in system properties and HBase conf 2024-12-13T07:33:06,251 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/hadoop.log.dir in system properties and HBase conf 2024-12-13T07:33:06,251 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-13T07:33:06,252 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-13T07:33:06,252 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-13T07:33:06,252 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-13T07:33:06,252 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-13T07:33:06,253 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-13T07:33:06,253 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-13T07:33:06,253 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:acl, region=4825f6265c91ad571de6370b4fecbf02, ASSIGN; state=OFFLINE, location=9eb75de94ef3,41895,1734075182430; forceNewPlan=false, retain=false 2024-12-13T07:33:06,253 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-13T07:33:06,253 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-13T07:33:06,253 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-13T07:33:06,253 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-13T07:33:06,253 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-13T07:33:06,254 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/nfs.dump.dir in system properties and HBase conf 2024-12-13T07:33:06,254 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/java.io.tmpdir in system properties and HBase conf 2024-12-13T07:33:06,254 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-13T07:33:06,254 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-13T07:33:06,254 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-13T07:33:06,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741841_1017 (size=592039) 2024-12-13T07:33:06,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741841_1017 (size=592039) 2024-12-13T07:33:06,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741841_1017 (size=592039) 2024-12-13T07:33:06,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741842_1018 (size=1663647) 2024-12-13T07:33:06,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741842_1018 (size=1663647) 2024-12-13T07:33:06,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741842_1018 (size=1663647) 2024-12-13T07:33:06,403 INFO [9eb75de94ef3:33713 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-13T07:33:06,404 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=4825f6265c91ad571de6370b4fecbf02, regionState=OPENING, regionLocation=9eb75de94ef3,41895,1734075182430 2024-12-13T07:33:06,408 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 4825f6265c91ad571de6370b4fecbf02, server=9eb75de94ef3,41895,1734075182430}] 2024-12-13T07:33:06,438 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-13T07:33:06,583 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,41895,1734075182430 2024-12-13T07:33:06,622 INFO [RS_OPEN_PRIORITY_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] handler.AssignRegionHandler(135): Open hbase:acl,,1734075186098.4825f6265c91ad571de6370b4fecbf02. 2024-12-13T07:33:06,622 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 4825f6265c91ad571de6370b4fecbf02, NAME => 'hbase:acl,,1734075186098.4825f6265c91ad571de6370b4fecbf02.', STARTKEY => '', ENDKEY => ''} 2024-12-13T07:33:06,623 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:acl,,1734075186098.4825f6265c91ad571de6370b4fecbf02. service=AccessControlService 2024-12-13T07:33:06,623 INFO [RS_OPEN_PRIORITY_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T07:33:06,624 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl 4825f6265c91ad571de6370b4fecbf02 2024-12-13T07:33:06,624 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(894): Instantiated hbase:acl,,1734075186098.4825f6265c91ad571de6370b4fecbf02.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:33:06,624 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 4825f6265c91ad571de6370b4fecbf02 2024-12-13T07:33:06,624 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 4825f6265c91ad571de6370b4fecbf02 2024-12-13T07:33:06,627 INFO [StoreOpener-4825f6265c91ad571de6370b4fecbf02-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region 4825f6265c91ad571de6370b4fecbf02 2024-12-13T07:33:06,631 INFO [StoreOpener-4825f6265c91ad571de6370b4fecbf02-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4825f6265c91ad571de6370b4fecbf02 columnFamilyName l 2024-12-13T07:33:06,631 DEBUG [StoreOpener-4825f6265c91ad571de6370b4fecbf02-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:33:06,632 INFO [StoreOpener-4825f6265c91ad571de6370b4fecbf02-1 {}] regionserver.HStore(327): Store=4825f6265c91ad571de6370b4fecbf02/l, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T07:33:06,634 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/hbase/acl/4825f6265c91ad571de6370b4fecbf02 2024-12-13T07:33:06,635 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/hbase/acl/4825f6265c91ad571de6370b4fecbf02 2024-12-13T07:33:06,638 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 4825f6265c91ad571de6370b4fecbf02 2024-12-13T07:33:06,642 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/hbase/acl/4825f6265c91ad571de6370b4fecbf02/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T07:33:06,643 INFO [RS_OPEN_PRIORITY_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(1102): Opened 4825f6265c91ad571de6370b4fecbf02; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70122080, jitterRate=0.04490041732788086}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T07:33:06,646 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 4825f6265c91ad571de6370b4fecbf02: 2024-12-13T07:33:06,652 INFO [RS_OPEN_PRIORITY_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:acl,,1734075186098.4825f6265c91ad571de6370b4fecbf02., pid=11, masterSystemTime=1734075186582 2024-12-13T07:33:06,658 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=4825f6265c91ad571de6370b4fecbf02, regionState=OPEN, openSeqNum=2, regionLocation=9eb75de94ef3,41895,1734075182430 2024-12-13T07:33:06,659 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:acl,,1734075186098.4825f6265c91ad571de6370b4fecbf02. 2024-12-13T07:33:06,659 INFO [RS_OPEN_PRIORITY_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] handler.AssignRegionHandler(164): Opened hbase:acl,,1734075186098.4825f6265c91ad571de6370b4fecbf02. 2024-12-13T07:33:06,676 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-13T07:33:06,677 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 4825f6265c91ad571de6370b4fecbf02, server=9eb75de94ef3,41895,1734075182430 in 262 msec 2024-12-13T07:33:06,684 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-13T07:33:06,684 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=hbase:acl, region=4825f6265c91ad571de6370b4fecbf02, ASSIGN in 432 msec 2024-12-13T07:33:06,686 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T07:33:06,686 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075186686"}]},"ts":"1734075186686"} 2024-12-13T07:33:06,689 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:acl, state=ENABLED in hbase:meta 2024-12-13T07:33:06,736 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T07:33:06,739 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-13T07:33:06,741 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=hbase:acl in 634 msec 2024-12-13T07:33:07,239 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-13T07:33:07,240 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: hbase:acl, procId: 9 completed 2024-12-13T07:33:07,260 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-13T07:33:07,262 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-13T07:33:07,262 INFO [master/9eb75de94ef3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9eb75de94ef3,33713,1734075181583-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-13T07:33:07,963 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T07:33:08,081 WARN [Thread-398 {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T07:33:08,338 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T07:33:08,338 WARN [Thread-398 {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-13T07:33:08,340 INFO [Thread-398 {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T07:33:08,360 INFO [Thread-398 {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T07:33:08,360 INFO [Thread-398 {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T07:33:08,360 INFO [Thread-398 {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-13T07:33:08,363 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T07:33:08,363 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T07:33:08,364 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-13T07:33:08,364 INFO [Thread-398 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@793676f0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/hadoop.log.dir/,AVAILABLE} 2024-12-13T07:33:08,365 INFO [Thread-398 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3d703f12{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-13T07:33:08,366 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T07:33:08,376 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@89a48b0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/hadoop.log.dir/,AVAILABLE} 2024-12-13T07:33:08,377 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a82b03d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-13T07:33:08,541 INFO [Thread-398 {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.mapreduce.v2.hs.webapp.HsWebServices as a root resource class 2024-12-13T07:33:08,542 INFO [Thread-398 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.mapreduce.v2.hs.webapp.JAXBContextResolver as a provider class 2024-12-13T07:33:08,542 INFO [Thread-398 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-13T07:33:08,545 INFO [Thread-398 {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-13T07:33:08,609 INFO [Thread-398 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.mapreduce.v2.hs.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-13T07:33:08,844 INFO [Thread-398 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-13T07:33:09,207 INFO [Thread-398 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.mapreduce.v2.hs.webapp.HsWebServices to GuiceManagedComponentProvider with the scope "PerRequest" 2024-12-13T07:33:09,238 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2e6298f3{cluster,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/java.io.tmpdir/jetty-localhost-41897-hadoop-yarn-common-3_4_1_jar-_-any-3021865522062857377/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-13T07:33:09,239 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6641ae5{HTTP/1.1, (http/1.1)}{localhost:41897} 2024-12-13T07:33:09,239 INFO [Time-limited test {}] server.Server(415): Started @15727ms 2024-12-13T07:33:09,243 INFO [Thread-398 {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@47a75dcc{jobhistory,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/java.io.tmpdir/jetty-localhost-40585-hadoop-yarn-common-3_4_1_jar-_-any-5613873669691325987/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-13T07:33:09,244 INFO [Thread-398 {}] server.AbstractConnector(333): Started ServerConnector@aa33399{HTTP/1.1, (http/1.1)}{localhost:40585} 2024-12-13T07:33:09,244 INFO [Thread-398 {}] server.Server(415): Started @15732ms 2024-12-13T07:33:09,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741843_1019 (size=5) 2024-12-13T07:33:09,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741843_1019 (size=5) 2024-12-13T07:33:09,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741843_1019 (size=5) 2024-12-13T07:33:10,255 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-13T07:33:10,262 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T07:33:10,291 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-13T07:33:10,291 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T07:33:10,299 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T07:33:10,299 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T07:33:10,300 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-13T07:33:10,303 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T07:33:10,304 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@600ff4a6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/hadoop.log.dir/,AVAILABLE} 2024-12-13T07:33:10,305 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10274189{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-13T07:33:10,357 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-13T07:33:10,357 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-13T07:33:10,357 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-13T07:33:10,358 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-13T07:33:10,367 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-13T07:33:10,387 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-13T07:33:10,590 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-13T07:33:10,613 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-13T07:33:10,623 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5db52259{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/java.io.tmpdir/jetty-localhost-39929-hadoop-yarn-common-3_4_1_jar-_-any-16360913035313642865/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-13T07:33:10,648 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@58d59e3{HTTP/1.1, (http/1.1)}{localhost:39929} 2024-12-13T07:33:10,650 INFO [Time-limited test {}] server.Server(415): Started @17138ms 2024-12-13T07:33:10,758 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-13T07:33:10,760 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl' 2024-12-13T07:33:10,761 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-13T07:33:10,971 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-13T07:33:10,974 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T07:33:10,989 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-13T07:33:10,989 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T07:33:10,996 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T07:33:10,996 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T07:33:10,996 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-13T07:33:10,996 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T07:33:10,997 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3c9562ea{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/hadoop.log.dir/,AVAILABLE} 2024-12-13T07:33:10,997 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f71bc03{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-13T07:33:11,052 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-13T07:33:11,052 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-13T07:33:11,052 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-13T07:33:11,052 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-13T07:33:11,062 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-13T07:33:11,071 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-13T07:33:11,203 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-13T07:33:11,208 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@826335e{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/java.io.tmpdir/jetty-localhost-45607-hadoop-yarn-common-3_4_1_jar-_-any-4205885978029962179/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-13T07:33:11,209 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@54584004{HTTP/1.1, (http/1.1)}{localhost:45607} 2024-12-13T07:33:11,209 INFO [Time-limited test {}] server.Server(415): Started @17697ms 2024-12-13T07:33:11,251 INFO [Time-limited test {}] hbase.HBaseTestingUtility(2825): Mini mapreduce cluster started 2024-12-13T07:33:11,253 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T07:33:11,315 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=724, OpenFileDescriptor=783, MaxFileDescriptor=1048576, SystemLoadAverage=566, ProcessCount=11, AvailableMemoryMB=9155 2024-12-13T07:33:11,315 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=724 is superior to 500 2024-12-13T07:33:11,329 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-13T07:33:11,340 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48840, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-13T07:33:11,354 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T07:33:11,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportWithTargetName 2024-12-13T07:33:11,359 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T07:33:11,359 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:33:11,362 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithTargetName" procId is: 12 2024-12-13T07:33:11,362 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T07:33:11,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-13T07:33:11,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741844_1020 (size=406) 2024-12-13T07:33:11,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741844_1020 (size=406) 2024-12-13T07:33:11,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741844_1020 (size=406) 2024-12-13T07:33:11,428 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 9fbb3809c42d8083292c98070b60fdcb, NAME => 'testtb-testExportWithTargetName,,1734075191353.9fbb3809c42d8083292c98070b60fdcb.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:33:11,430 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => c0e1eccb42c55f51c6e5369f86ec45ca, NAME => 'testtb-testExportWithTargetName,1,1734075191353.c0e1eccb42c55f51c6e5369f86ec45ca.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:33:11,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741845_1021 (size=67) 2024-12-13T07:33:11,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741845_1021 (size=67) 2024-12-13T07:33:11,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741845_1021 (size=67) 2024-12-13T07:33:11,470 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithTargetName,,1734075191353.9fbb3809c42d8083292c98070b60fdcb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:33:11,470 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1681): Closing 9fbb3809c42d8083292c98070b60fdcb, disabling compactions & flushes 2024-12-13T07:33:11,470 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithTargetName,,1734075191353.9fbb3809c42d8083292c98070b60fdcb. 2024-12-13T07:33:11,470 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithTargetName,,1734075191353.9fbb3809c42d8083292c98070b60fdcb. 2024-12-13T07:33:11,470 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithTargetName,,1734075191353.9fbb3809c42d8083292c98070b60fdcb. after waiting 0 ms 2024-12-13T07:33:11,470 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithTargetName,,1734075191353.9fbb3809c42d8083292c98070b60fdcb. 2024-12-13T07:33:11,470 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportWithTargetName,,1734075191353.9fbb3809c42d8083292c98070b60fdcb. 2024-12-13T07:33:11,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-13T07:33:11,470 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1635): Region close journal for 9fbb3809c42d8083292c98070b60fdcb: 2024-12-13T07:33:11,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741846_1022 (size=67) 2024-12-13T07:33:11,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741846_1022 (size=67) 2024-12-13T07:33:11,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741846_1022 (size=67) 2024-12-13T07:33:11,474 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithTargetName,1,1734075191353.c0e1eccb42c55f51c6e5369f86ec45ca.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:33:11,474 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1681): Closing c0e1eccb42c55f51c6e5369f86ec45ca, disabling compactions & flushes 2024-12-13T07:33:11,474 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithTargetName,1,1734075191353.c0e1eccb42c55f51c6e5369f86ec45ca. 2024-12-13T07:33:11,474 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithTargetName,1,1734075191353.c0e1eccb42c55f51c6e5369f86ec45ca. 2024-12-13T07:33:11,475 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithTargetName,1,1734075191353.c0e1eccb42c55f51c6e5369f86ec45ca. after waiting 0 ms 2024-12-13T07:33:11,475 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithTargetName,1,1734075191353.c0e1eccb42c55f51c6e5369f86ec45ca. 2024-12-13T07:33:11,475 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportWithTargetName,1,1734075191353.c0e1eccb42c55f51c6e5369f86ec45ca. 2024-12-13T07:33:11,475 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1635): Region close journal for c0e1eccb42c55f51c6e5369f86ec45ca: 2024-12-13T07:33:11,477 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T07:33:11,478 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,,1734075191353.9fbb3809c42d8083292c98070b60fdcb.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1734075191478"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734075191478"}]},"ts":"1734075191478"} 2024-12-13T07:33:11,478 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,1,1734075191353.c0e1eccb42c55f51c6e5369f86ec45ca.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1734075191478"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734075191478"}]},"ts":"1734075191478"} 2024-12-13T07:33:11,517 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-13T07:33:11,519 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T07:33:11,519 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075191519"}]},"ts":"1734075191519"} 2024-12-13T07:33:11,521 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithTargetName, state=ENABLING in hbase:meta 2024-12-13T07:33:11,565 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(202): Hosts are {9eb75de94ef3=0} racks are {/default-rack=0} 2024-12-13T07:33:11,568 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-13T07:33:11,568 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-13T07:33:11,568 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-13T07:33:11,568 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-13T07:33:11,569 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-13T07:33:11,569 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-13T07:33:11,569 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-13T07:33:11,569 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=9fbb3809c42d8083292c98070b60fdcb, ASSIGN}, {pid=14, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=c0e1eccb42c55f51c6e5369f86ec45ca, ASSIGN}] 2024-12-13T07:33:11,572 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=14, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=c0e1eccb42c55f51c6e5369f86ec45ca, ASSIGN 2024-12-13T07:33:11,573 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=9fbb3809c42d8083292c98070b60fdcb, ASSIGN 2024-12-13T07:33:11,574 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=14, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=c0e1eccb42c55f51c6e5369f86ec45ca, ASSIGN; state=OFFLINE, location=9eb75de94ef3,36021,1734075182342; forceNewPlan=false, retain=false 2024-12-13T07:33:11,575 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=9fbb3809c42d8083292c98070b60fdcb, ASSIGN; state=OFFLINE, location=9eb75de94ef3,41895,1734075182430; forceNewPlan=false, retain=false 2024-12-13T07:33:11,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-13T07:33:11,725 INFO [9eb75de94ef3:33713 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-13T07:33:11,725 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=14 updating hbase:meta row=c0e1eccb42c55f51c6e5369f86ec45ca, regionState=OPENING, regionLocation=9eb75de94ef3,36021,1734075182342 2024-12-13T07:33:11,725 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=13 updating hbase:meta row=9fbb3809c42d8083292c98070b60fdcb, regionState=OPENING, regionLocation=9eb75de94ef3,41895,1734075182430 2024-12-13T07:33:11,728 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; OpenRegionProcedure c0e1eccb42c55f51c6e5369f86ec45ca, server=9eb75de94ef3,36021,1734075182342}] 2024-12-13T07:33:11,729 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=16, ppid=13, state=RUNNABLE; OpenRegionProcedure 9fbb3809c42d8083292c98070b60fdcb, server=9eb75de94ef3,41895,1734075182430}] 2024-12-13T07:33:11,881 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,36021,1734075182342 2024-12-13T07:33:11,881 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-13T07:33:11,883 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,41895,1734075182430 2024-12-13T07:33:11,904 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] handler.AssignRegionHandler(135): Open testtb-testExportWithTargetName,,1734075191353.9fbb3809c42d8083292c98070b60fdcb. 2024-12-13T07:33:11,904 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(7285): Opening region: {ENCODED => 9fbb3809c42d8083292c98070b60fdcb, NAME => 'testtb-testExportWithTargetName,,1734075191353.9fbb3809c42d8083292c98070b60fdcb.', STARTKEY => '', ENDKEY => '1'} 2024-12-13T07:33:11,905 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithTargetName,,1734075191353.9fbb3809c42d8083292c98070b60fdcb. service=AccessControlService 2024-12-13T07:33:11,905 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56812, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-13T07:33:11,905 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T07:33:11,905 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName 9fbb3809c42d8083292c98070b60fdcb 2024-12-13T07:33:11,906 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(894): Instantiated testtb-testExportWithTargetName,,1734075191353.9fbb3809c42d8083292c98070b60fdcb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:33:11,906 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(7327): checking encryption for 9fbb3809c42d8083292c98070b60fdcb 2024-12-13T07:33:11,906 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(7330): checking classloading for 9fbb3809c42d8083292c98070b60fdcb 2024-12-13T07:33:11,931 INFO [StoreOpener-9fbb3809c42d8083292c98070b60fdcb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 9fbb3809c42d8083292c98070b60fdcb 2024-12-13T07:33:11,935 INFO [StoreOpener-9fbb3809c42d8083292c98070b60fdcb-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9fbb3809c42d8083292c98070b60fdcb columnFamilyName cf 2024-12-13T07:33:11,935 DEBUG [StoreOpener-9fbb3809c42d8083292c98070b60fdcb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:33:11,935 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] handler.AssignRegionHandler(135): Open testtb-testExportWithTargetName,1,1734075191353.c0e1eccb42c55f51c6e5369f86ec45ca. 2024-12-13T07:33:11,936 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7285): Opening region: {ENCODED => c0e1eccb42c55f51c6e5369f86ec45ca, NAME => 'testtb-testExportWithTargetName,1,1734075191353.c0e1eccb42c55f51c6e5369f86ec45ca.', STARTKEY => '1', ENDKEY => ''} 2024-12-13T07:33:11,936 INFO [StoreOpener-9fbb3809c42d8083292c98070b60fdcb-1 {}] regionserver.HStore(327): Store=9fbb3809c42d8083292c98070b60fdcb/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T07:33:11,936 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithTargetName,1,1734075191353.c0e1eccb42c55f51c6e5369f86ec45ca. service=AccessControlService 2024-12-13T07:33:11,937 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T07:33:11,937 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName c0e1eccb42c55f51c6e5369f86ec45ca 2024-12-13T07:33:11,937 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(894): Instantiated testtb-testExportWithTargetName,1,1734075191353.c0e1eccb42c55f51c6e5369f86ec45ca.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:33:11,937 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7327): checking encryption for c0e1eccb42c55f51c6e5369f86ec45ca 2024-12-13T07:33:11,937 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7330): checking classloading for c0e1eccb42c55f51c6e5369f86ec45ca 2024-12-13T07:33:11,937 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithTargetName/9fbb3809c42d8083292c98070b60fdcb 2024-12-13T07:33:11,938 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithTargetName/9fbb3809c42d8083292c98070b60fdcb 2024-12-13T07:33:11,945 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(1085): writing seq id for 9fbb3809c42d8083292c98070b60fdcb 2024-12-13T07:33:11,953 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithTargetName/9fbb3809c42d8083292c98070b60fdcb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T07:33:11,954 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(1102): Opened 9fbb3809c42d8083292c98070b60fdcb; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59882316, jitterRate=-0.10768395662307739}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T07:33:11,956 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(1001): Region open journal for 9fbb3809c42d8083292c98070b60fdcb: 2024-12-13T07:33:11,959 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithTargetName,,1734075191353.9fbb3809c42d8083292c98070b60fdcb., pid=16, masterSystemTime=1734075191883 2024-12-13T07:33:11,963 INFO [StoreOpener-c0e1eccb42c55f51c6e5369f86ec45ca-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c0e1eccb42c55f51c6e5369f86ec45ca 2024-12-13T07:33:11,966 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=13 updating hbase:meta row=9fbb3809c42d8083292c98070b60fdcb, regionState=OPEN, openSeqNum=2, regionLocation=9eb75de94ef3,41895,1734075182430 2024-12-13T07:33:11,966 INFO [StoreOpener-c0e1eccb42c55f51c6e5369f86ec45ca-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c0e1eccb42c55f51c6e5369f86ec45ca columnFamilyName cf 2024-12-13T07:33:11,967 DEBUG [StoreOpener-c0e1eccb42c55f51c6e5369f86ec45ca-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:33:11,969 INFO [StoreOpener-c0e1eccb42c55f51c6e5369f86ec45ca-1 {}] regionserver.HStore(327): Store=c0e1eccb42c55f51c6e5369f86ec45ca/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T07:33:11,975 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithTargetName/c0e1eccb42c55f51c6e5369f86ec45ca 2024-12-13T07:33:11,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-13T07:33:11,976 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithTargetName/c0e1eccb42c55f51c6e5369f86ec45ca 2024-12-13T07:33:11,977 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithTargetName,,1734075191353.9fbb3809c42d8083292c98070b60fdcb. 2024-12-13T07:33:11,977 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] handler.AssignRegionHandler(164): Opened testtb-testExportWithTargetName,,1734075191353.9fbb3809c42d8083292c98070b60fdcb. 2024-12-13T07:33:11,977 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=16, resume processing ppid=13 2024-12-13T07:33:11,977 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, ppid=13, state=SUCCESS; OpenRegionProcedure 9fbb3809c42d8083292c98070b60fdcb, server=9eb75de94ef3,41895,1734075182430 in 241 msec 2024-12-13T07:33:11,982 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=9fbb3809c42d8083292c98070b60fdcb, ASSIGN in 408 msec 2024-12-13T07:33:11,984 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1085): writing seq id for c0e1eccb42c55f51c6e5369f86ec45ca 2024-12-13T07:33:11,997 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithTargetName/c0e1eccb42c55f51c6e5369f86ec45ca/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T07:33:11,998 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1102): Opened c0e1eccb42c55f51c6e5369f86ec45ca; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59802880, jitterRate=-0.10886764526367188}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T07:33:11,999 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1001): Region open journal for c0e1eccb42c55f51c6e5369f86ec45ca: 2024-12-13T07:33:12,000 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithTargetName,1,1734075191353.c0e1eccb42c55f51c6e5369f86ec45ca., pid=15, masterSystemTime=1734075191880 2024-12-13T07:33:12,004 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithTargetName,1,1734075191353.c0e1eccb42c55f51c6e5369f86ec45ca. 2024-12-13T07:33:12,004 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] handler.AssignRegionHandler(164): Opened testtb-testExportWithTargetName,1,1734075191353.c0e1eccb42c55f51c6e5369f86ec45ca. 2024-12-13T07:33:12,006 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=14 updating hbase:meta row=c0e1eccb42c55f51c6e5369f86ec45ca, regionState=OPEN, openSeqNum=2, regionLocation=9eb75de94ef3,36021,1734075182342 2024-12-13T07:33:12,015 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-12-13T07:33:12,017 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; OpenRegionProcedure c0e1eccb42c55f51c6e5369f86ec45ca, server=9eb75de94ef3,36021,1734075182342 in 283 msec 2024-12-13T07:33:12,020 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=14, resume processing ppid=12 2024-12-13T07:33:12,020 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=c0e1eccb42c55f51c6e5369f86ec45ca, ASSIGN in 446 msec 2024-12-13T07:33:12,021 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T07:33:12,022 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T07:33:12,022 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionObservers 2024-12-13T07:33:12,022 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075192022"}]},"ts":"1734075192022"} 2024-12-13T07:33:12,022 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-13T07:33:12,022 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-13T07:33:12,024 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T07:33:12,025 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionServerObservers 2024-12-13T07:33:12,025 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-13T07:33:12,025 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-13T07:33:12,028 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithTargetName, state=ENABLED in hbase:meta 2024-12-13T07:33:12,029 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-13T07:33:12,029 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-13T07:33:12,031 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-13T07:33:12,031 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_acl Metrics about Tables on a single HBase RegionServer 2024-12-13T07:33:12,032 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T07:33:12,033 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase MasterObservers 2024-12-13T07:33:12,033 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-13T07:33:12,033 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver Metrics about HBase MasterObservers 2024-12-13T07:33:12,034 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-13T07:33:12,034 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-13T07:33:12,034 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-13T07:33:12,034 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName Metrics about Tables on a single HBase RegionServer 2024-12-13T07:33:12,066 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T07:33:12,071 DEBUG [PEWorker-5 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportWithTargetName jenkins: RWXCA 2024-12-13T07:33:12,090 DEBUG [hconnection-0x26925e55-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:33:12,115 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32976, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=ClientService 2024-12-13T07:33:12,124 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41895 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-13T07:33:12,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-13T07:33:12,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-13T07:33:12,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-13T07:33:12,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T07:33:12,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T07:33:12,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T07:33:12,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-13T07:33:12,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T07:33:12,256 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-13T07:33:12,256 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-13T07:33:12,256 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-13T07:33:12,256 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-13T07:33:12,260 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; CreateTableProcedure table=testtb-testExportWithTargetName in 902 msec 2024-12-13T07:33:12,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-13T07:33:12,479 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportWithTargetName, procId: 12 completed 2024-12-13T07:33:12,479 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportWithTargetName get assigned. Timeout = 60000ms 2024-12-13T07:33:12,479 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T07:33:12,485 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportWithTargetName assigned to meta. Checking AM states. 2024-12-13T07:33:12,486 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T07:33:12,486 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportWithTargetName assigned. 2024-12-13T07:33:12,500 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-13T07:33:12,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734075192500 (current time:1734075192500). 2024-12-13T07:33:12,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-13T07:33:12,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-13T07:33:12,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T07:33:12,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x46c3fe4f to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@503d98d3 2024-12-13T07:33:12,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@644d5f72, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:33:12,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:33:12,521 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32986, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:33:12,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x46c3fe4f to 127.0.0.1:59810 2024-12-13T07:33:12,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:33:12,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6048fd7f to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7bccc802 2024-12-13T07:33:12,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d024e50, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:33:12,544 DEBUG [hconnection-0x15520dca-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:33:12,546 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33000, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:33:12,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:33:12,551 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54866, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:33:12,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6048fd7f to 127.0.0.1:59810 2024-12-13T07:33:12,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:33:12,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-13T07:33:12,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T07:33:12,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=17, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-13T07:33:12,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 17 2024-12-13T07:33:12,579 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-13T07:33:12,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=17 2024-12-13T07:33:12,584 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T07:33:12,595 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T07:33:12,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741847_1023 (size=167) 2024-12-13T07:33:12,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741847_1023 (size=167) 2024-12-13T07:33:12,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741847_1023 (size=167) 2024-12-13T07:33:12,608 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T07:33:12,611 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=18, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 9fbb3809c42d8083292c98070b60fdcb}, {pid=19, ppid=17, state=RUNNABLE; SnapshotRegionProcedure c0e1eccb42c55f51c6e5369f86ec45ca}] 2024-12-13T07:33:12,616 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=18, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 9fbb3809c42d8083292c98070b60fdcb 2024-12-13T07:33:12,616 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=19, ppid=17, state=RUNNABLE; SnapshotRegionProcedure c0e1eccb42c55f51c6e5369f86ec45ca 2024-12-13T07:33:12,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=17 2024-12-13T07:33:12,773 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,36021,1734075182342 2024-12-13T07:33:12,773 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,41895,1734075182430 2024-12-13T07:33:12,775 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36021 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=19 2024-12-13T07:33:12,775 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41895 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=18 2024-12-13T07:33:12,776 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1734075191353.9fbb3809c42d8083292c98070b60fdcb. 2024-12-13T07:33:12,776 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1734075191353.c0e1eccb42c55f51c6e5369f86ec45ca. 2024-12-13T07:33:12,778 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for c0e1eccb42c55f51c6e5369f86ec45ca: 2024-12-13T07:33:12,778 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.HRegion(2538): Flush status journal for 9fbb3809c42d8083292c98070b60fdcb: 2024-12-13T07:33:12,778 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1734075191353.9fbb3809c42d8083292c98070b60fdcb. for emptySnaptb0-testExportWithTargetName completed. 2024-12-13T07:33:12,778 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1734075191353.c0e1eccb42c55f51c6e5369f86ec45ca. for emptySnaptb0-testExportWithTargetName completed. 2024-12-13T07:33:12,779 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1734075191353.9fbb3809c42d8083292c98070b60fdcb.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-13T07:33:12,779 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1734075191353.c0e1eccb42c55f51c6e5369f86ec45ca.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-13T07:33:12,783 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T07:33:12,783 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T07:33:12,785 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-13T07:33:12,785 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-13T07:33:12,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741848_1024 (size=70) 2024-12-13T07:33:12,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741848_1024 (size=70) 2024-12-13T07:33:12,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741848_1024 (size=70) 2024-12-13T07:33:12,811 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1734075191353.9fbb3809c42d8083292c98070b60fdcb. 2024-12-13T07:33:12,814 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=18 2024-12-13T07:33:12,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.HMaster(4106): Remote procedure done, pid=18 2024-12-13T07:33:12,817 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region 9fbb3809c42d8083292c98070b60fdcb 2024-12-13T07:33:12,818 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=18, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 9fbb3809c42d8083292c98070b60fdcb 2024-12-13T07:33:12,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741849_1025 (size=70) 2024-12-13T07:33:12,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741849_1025 (size=70) 2024-12-13T07:33:12,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741849_1025 (size=70) 2024-12-13T07:33:12,825 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1734075191353.c0e1eccb42c55f51c6e5369f86ec45ca. 2024-12-13T07:33:12,825 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-12-13T07:33:12,825 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, ppid=17, state=SUCCESS; SnapshotRegionProcedure 9fbb3809c42d8083292c98070b60fdcb in 211 msec 2024-12-13T07:33:12,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-12-13T07:33:12,826 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region c0e1eccb42c55f51c6e5369f86ec45ca 2024-12-13T07:33:12,826 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=19, ppid=17, state=RUNNABLE; SnapshotRegionProcedure c0e1eccb42c55f51c6e5369f86ec45ca 2024-12-13T07:33:12,832 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=17 2024-12-13T07:33:12,832 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T07:33:12,833 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=17, state=SUCCESS; SnapshotRegionProcedure c0e1eccb42c55f51c6e5369f86ec45ca in 217 msec 2024-12-13T07:33:12,835 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T07:33:12,840 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T07:33:12,840 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithTargetName 2024-12-13T07:33:12,844 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName 2024-12-13T07:33:12,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=17 2024-12-13T07:33:12,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741850_1026 (size=549) 2024-12-13T07:33:12,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741850_1026 (size=549) 2024-12-13T07:33:12,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741850_1026 (size=549) 2024-12-13T07:33:12,900 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T07:33:12,916 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T07:33:12,917 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/emptySnaptb0-testExportWithTargetName 2024-12-13T07:33:12,922 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T07:33:12,922 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 17 2024-12-13T07:33:12,925 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 348 msec 2024-12-13T07:33:13,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=17 2024-12-13T07:33:13,186 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName, procId: 17 completed 2024-12-13T07:33:13,209 DEBUG [htable-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:33:13,209 DEBUG [htable-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:33:13,211 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54882, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:33:13,211 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56820, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:33:13,212 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41895 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithTargetName,,1734075191353.9fbb3809c42d8083292c98070b60fdcb. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T07:33:13,213 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36021 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithTargetName,1,1734075191353.c0e1eccb42c55f51c6e5369f86ec45ca. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T07:33:13,220 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportWithTargetName 2024-12-13T07:33:13,221 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportWithTargetName,,1734075191353.9fbb3809c42d8083292c98070b60fdcb. 2024-12-13T07:33:13,222 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T07:33:13,261 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-13T07:33:13,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734075193261 (current time:1734075193261). 2024-12-13T07:33:13,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-13T07:33:13,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-13T07:33:13,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T07:33:13,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6fd57777 to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@480596a2 2024-12-13T07:33:13,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1dc619b8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:33:13,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:33:13,279 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33016, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:33:13,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6fd57777 to 127.0.0.1:59810 2024-12-13T07:33:13,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:33:13,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x70fc71b8 to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@14284983 2024-12-13T07:33:13,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@398e9967, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:33:13,303 DEBUG [hconnection-0x3b6eeaca-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:33:13,305 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33020, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:33:13,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:33:13,314 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54890, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:33:13,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x70fc71b8 to 127.0.0.1:59810 2024-12-13T07:33:13,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:33:13,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-13T07:33:13,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T07:33:13,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-13T07:33:13,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 20 2024-12-13T07:33:13,324 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-13T07:33:13,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-13T07:33:13,325 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T07:33:13,330 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T07:33:13,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741851_1027 (size=162) 2024-12-13T07:33:13,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741851_1027 (size=162) 2024-12-13T07:33:13,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741851_1027 (size=162) 2024-12-13T07:33:13,366 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T07:33:13,366 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 9fbb3809c42d8083292c98070b60fdcb}, {pid=22, ppid=20, state=RUNNABLE; SnapshotRegionProcedure c0e1eccb42c55f51c6e5369f86ec45ca}] 2024-12-13T07:33:13,368 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=21, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 9fbb3809c42d8083292c98070b60fdcb 2024-12-13T07:33:13,368 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=22, ppid=20, state=RUNNABLE; SnapshotRegionProcedure c0e1eccb42c55f51c6e5369f86ec45ca 2024-12-13T07:33:13,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-13T07:33:13,520 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,41895,1734075182430 2024-12-13T07:33:13,520 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,36021,1734075182342 2024-12-13T07:33:13,521 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41895 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=21 2024-12-13T07:33:13,521 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36021 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=22 2024-12-13T07:33:13,522 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1734075191353.c0e1eccb42c55f51c6e5369f86ec45ca. 2024-12-13T07:33:13,522 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1734075191353.9fbb3809c42d8083292c98070b60fdcb. 2024-12-13T07:33:13,522 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 9fbb3809c42d8083292c98070b60fdcb 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-13T07:33:13,523 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HRegion(2837): Flushing c0e1eccb42c55f51c6e5369f86ec45ca 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-13T07:33:13,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithTargetName/9fbb3809c42d8083292c98070b60fdcb/.tmp/cf/6c56e352bbd1450a82a20a94f180af2f is 71, key is 02f478f27e03185439ad735089f3cdb8/cf:q/1734075193212/Put/seqid=0 2024-12-13T07:33:13,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithTargetName/c0e1eccb42c55f51c6e5369f86ec45ca/.tmp/cf/1348d3002acc4f7a90af87e5eaaaab05 is 71, key is 22a866076724dba0b3982c4b37959409/cf:q/1734075193213/Put/seqid=0 2024-12-13T07:33:13,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-13T07:33:13,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741852_1028 (size=5288) 2024-12-13T07:33:13,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741852_1028 (size=5288) 2024-12-13T07:33:13,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741852_1028 (size=5288) 2024-12-13T07:33:13,644 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithTargetName/9fbb3809c42d8083292c98070b60fdcb/.tmp/cf/6c56e352bbd1450a82a20a94f180af2f 2024-12-13T07:33:13,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741853_1029 (size=8326) 2024-12-13T07:33:13,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741853_1029 (size=8326) 2024-12-13T07:33:13,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741853_1029 (size=8326) 2024-12-13T07:33:13,719 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithTargetName/9fbb3809c42d8083292c98070b60fdcb/.tmp/cf/6c56e352bbd1450a82a20a94f180af2f as hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithTargetName/9fbb3809c42d8083292c98070b60fdcb/cf/6c56e352bbd1450a82a20a94f180af2f 2024-12-13T07:33:13,754 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithTargetName/9fbb3809c42d8083292c98070b60fdcb/cf/6c56e352bbd1450a82a20a94f180af2f, entries=3, sequenceid=6, filesize=5.2 K 2024-12-13T07:33:13,759 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 9fbb3809c42d8083292c98070b60fdcb in 236ms, sequenceid=6, compaction requested=false 2024-12-13T07:33:13,759 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName' 2024-12-13T07:33:13,760 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 9fbb3809c42d8083292c98070b60fdcb: 2024-12-13T07:33:13,761 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1734075191353.9fbb3809c42d8083292c98070b60fdcb. for snaptb0-testExportWithTargetName completed. 2024-12-13T07:33:13,761 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1734075191353.9fbb3809c42d8083292c98070b60fdcb.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-13T07:33:13,761 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T07:33:13,762 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithTargetName/9fbb3809c42d8083292c98070b60fdcb/cf/6c56e352bbd1450a82a20a94f180af2f] hfiles 2024-12-13T07:33:13,762 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithTargetName/9fbb3809c42d8083292c98070b60fdcb/cf/6c56e352bbd1450a82a20a94f180af2f for snapshot=snaptb0-testExportWithTargetName 2024-12-13T07:33:13,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741854_1030 (size=109) 2024-12-13T07:33:13,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741854_1030 (size=109) 2024-12-13T07:33:13,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741854_1030 (size=109) 2024-12-13T07:33:13,798 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1734075191353.9fbb3809c42d8083292c98070b60fdcb. 2024-12-13T07:33:13,798 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-12-13T07:33:13,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-12-13T07:33:13,799 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region 9fbb3809c42d8083292c98070b60fdcb 2024-12-13T07:33:13,799 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=21, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 9fbb3809c42d8083292c98070b60fdcb 2024-12-13T07:33:13,805 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; SnapshotRegionProcedure 9fbb3809c42d8083292c98070b60fdcb in 435 msec 2024-12-13T07:33:13,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-13T07:33:14,045 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithTargetName/c0e1eccb42c55f51c6e5369f86ec45ca/.tmp/cf/1348d3002acc4f7a90af87e5eaaaab05 2024-12-13T07:33:14,069 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithTargetName/c0e1eccb42c55f51c6e5369f86ec45ca/.tmp/cf/1348d3002acc4f7a90af87e5eaaaab05 as hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithTargetName/c0e1eccb42c55f51c6e5369f86ec45ca/cf/1348d3002acc4f7a90af87e5eaaaab05 2024-12-13T07:33:14,079 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithTargetName/c0e1eccb42c55f51c6e5369f86ec45ca/cf/1348d3002acc4f7a90af87e5eaaaab05, entries=47, sequenceid=6, filesize=8.1 K 2024-12-13T07:33:14,080 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HRegion(3040): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for c0e1eccb42c55f51c6e5369f86ec45ca in 558ms, sequenceid=6, compaction requested=false 2024-12-13T07:33:14,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HRegion(2538): Flush status journal for c0e1eccb42c55f51c6e5369f86ec45ca: 2024-12-13T07:33:14,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1734075191353.c0e1eccb42c55f51c6e5369f86ec45ca. for snaptb0-testExportWithTargetName completed. 2024-12-13T07:33:14,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1734075191353.c0e1eccb42c55f51c6e5369f86ec45ca.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-13T07:33:14,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T07:33:14,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithTargetName/c0e1eccb42c55f51c6e5369f86ec45ca/cf/1348d3002acc4f7a90af87e5eaaaab05] hfiles 2024-12-13T07:33:14,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithTargetName/c0e1eccb42c55f51c6e5369f86ec45ca/cf/1348d3002acc4f7a90af87e5eaaaab05 for snapshot=snaptb0-testExportWithTargetName 2024-12-13T07:33:14,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741855_1031 (size=109) 2024-12-13T07:33:14,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741855_1031 (size=109) 2024-12-13T07:33:14,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741855_1031 (size=109) 2024-12-13T07:33:14,092 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1734075191353.c0e1eccb42c55f51c6e5369f86ec45ca. 2024-12-13T07:33:14,092 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=22 2024-12-13T07:33:14,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.HMaster(4106): Remote procedure done, pid=22 2024-12-13T07:33:14,093 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region c0e1eccb42c55f51c6e5369f86ec45ca 2024-12-13T07:33:14,093 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=22, ppid=20, state=RUNNABLE; SnapshotRegionProcedure c0e1eccb42c55f51c6e5369f86ec45ca 2024-12-13T07:33:14,097 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=22, resume processing ppid=20 2024-12-13T07:33:14,097 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T07:33:14,097 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, ppid=20, state=SUCCESS; SnapshotRegionProcedure c0e1eccb42c55f51c6e5369f86ec45ca in 728 msec 2024-12-13T07:33:14,099 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T07:33:14,100 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T07:33:14,100 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithTargetName 2024-12-13T07:33:14,102 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName 2024-12-13T07:33:14,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741856_1032 (size=627) 2024-12-13T07:33:14,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741856_1032 (size=627) 2024-12-13T07:33:14,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741856_1032 (size=627) 2024-12-13T07:33:14,127 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T07:33:14,138 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T07:33:14,139 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-13T07:33:14,142 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T07:33:14,143 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 20 2024-12-13T07:33:14,146 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 824 msec 2024-12-13T07:33:14,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-13T07:33:14,434 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName, procId: 20 completed 2024-12-13T07:33:14,434 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075194434 2024-12-13T07:33:14,434 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:41477, tgtDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075194434, rawTgtDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075194434, srcFsUri=hdfs://localhost:41477, srcDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:33:14,491 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:41477, inputRoot=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:33:14,491 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-313103365_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075194434, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075194434/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-13T07:33:14,497 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-13T07:33:14,506 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testExportWithTargetName to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075194434/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-13T07:33:14,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741857_1033 (size=162) 2024-12-13T07:33:14,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741857_1033 (size=162) 2024-12-13T07:33:14,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741857_1033 (size=162) 2024-12-13T07:33:14,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741858_1034 (size=627) 2024-12-13T07:33:14,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741858_1034 (size=627) 2024-12-13T07:33:14,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741858_1034 (size=627) 2024-12-13T07:33:14,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741859_1035 (size=154) 2024-12-13T07:33:14,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741859_1035 (size=154) 2024-12-13T07:33:14,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741859_1035 (size=154) 2024-12-13T07:33:14,567 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-13T07:33:14,568 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-13T07:33:14,568 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-13T07:33:14,569 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-13T07:33:15,543 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/hadoop-920987146039340476.jar 2024-12-13T07:33:15,544 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-13T07:33:15,544 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-13T07:33:15,631 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/hadoop-636778726819947132.jar 2024-12-13T07:33:15,632 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-13T07:33:15,633 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-13T07:33:15,633 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-13T07:33:15,634 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-13T07:33:15,634 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-13T07:33:15,635 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-13T07:33:15,635 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-13T07:33:15,635 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-13T07:33:15,636 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-13T07:33:15,636 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-13T07:33:15,637 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-13T07:33:15,637 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-13T07:33:15,638 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-13T07:33:15,638 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-13T07:33:15,638 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-13T07:33:15,639 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-13T07:33:15,639 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-13T07:33:15,640 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-13T07:33:15,642 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T07:33:15,643 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T07:33:15,643 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T07:33:15,644 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T07:33:15,644 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T07:33:15,645 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T07:33:15,645 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T07:33:15,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741860_1036 (size=127628) 2024-12-13T07:33:15,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741860_1036 (size=127628) 2024-12-13T07:33:15,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741860_1036 (size=127628) 2024-12-13T07:33:15,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741861_1037 (size=2172137) 2024-12-13T07:33:15,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741861_1037 (size=2172137) 2024-12-13T07:33:15,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741861_1037 (size=2172137) 2024-12-13T07:33:15,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741862_1038 (size=213228) 2024-12-13T07:33:15,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741862_1038 (size=213228) 2024-12-13T07:33:15,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741862_1038 (size=213228) 2024-12-13T07:33:15,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741863_1039 (size=1877034) 2024-12-13T07:33:16,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741863_1039 (size=1877034) 2024-12-13T07:33:16,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741863_1039 (size=1877034) 2024-12-13T07:33:16,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741864_1040 (size=533455) 2024-12-13T07:33:16,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741864_1040 (size=533455) 2024-12-13T07:33:16,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741864_1040 (size=533455) 2024-12-13T07:33:16,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741865_1041 (size=7280644) 2024-12-13T07:33:16,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741865_1041 (size=7280644) 2024-12-13T07:33:16,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741865_1041 (size=7280644) 2024-12-13T07:33:16,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741866_1042 (size=4188619) 2024-12-13T07:33:16,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741866_1042 (size=4188619) 2024-12-13T07:33:16,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741866_1042 (size=4188619) 2024-12-13T07:33:16,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741867_1043 (size=20406) 2024-12-13T07:33:16,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741867_1043 (size=20406) 2024-12-13T07:33:16,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741867_1043 (size=20406) 2024-12-13T07:33:16,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741868_1044 (size=75495) 2024-12-13T07:33:16,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741868_1044 (size=75495) 2024-12-13T07:33:16,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741868_1044 (size=75495) 2024-12-13T07:33:16,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741869_1045 (size=45609) 2024-12-13T07:33:16,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741869_1045 (size=45609) 2024-12-13T07:33:16,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741869_1045 (size=45609) 2024-12-13T07:33:16,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741870_1046 (size=110084) 2024-12-13T07:33:16,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741870_1046 (size=110084) 2024-12-13T07:33:16,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741870_1046 (size=110084) 2024-12-13T07:33:16,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741871_1047 (size=451756) 2024-12-13T07:33:16,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741871_1047 (size=451756) 2024-12-13T07:33:16,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741871_1047 (size=451756) 2024-12-13T07:33:16,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741872_1048 (size=6350899) 2024-12-13T07:33:16,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741872_1048 (size=6350899) 2024-12-13T07:33:16,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741872_1048 (size=6350899) 2024-12-13T07:33:16,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741873_1049 (size=1323991) 2024-12-13T07:33:16,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741873_1049 (size=1323991) 2024-12-13T07:33:16,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741873_1049 (size=1323991) 2024-12-13T07:33:16,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741874_1050 (size=23076) 2024-12-13T07:33:16,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741874_1050 (size=23076) 2024-12-13T07:33:16,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741874_1050 (size=23076) 2024-12-13T07:33:16,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741875_1051 (size=126803) 2024-12-13T07:33:16,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741875_1051 (size=126803) 2024-12-13T07:33:16,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741875_1051 (size=126803) 2024-12-13T07:33:16,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741876_1052 (size=322274) 2024-12-13T07:33:16,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741876_1052 (size=322274) 2024-12-13T07:33:16,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741876_1052 (size=322274) 2024-12-13T07:33:16,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741877_1053 (size=1832290) 2024-12-13T07:33:16,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741877_1053 (size=1832290) 2024-12-13T07:33:16,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741877_1053 (size=1832290) 2024-12-13T07:33:16,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741878_1054 (size=30081) 2024-12-13T07:33:16,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741878_1054 (size=30081) 2024-12-13T07:33:16,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741878_1054 (size=30081) 2024-12-13T07:33:16,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741879_1055 (size=53616) 2024-12-13T07:33:16,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741879_1055 (size=53616) 2024-12-13T07:33:16,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741879_1055 (size=53616) 2024-12-13T07:33:16,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741880_1056 (size=29229) 2024-12-13T07:33:16,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741880_1056 (size=29229) 2024-12-13T07:33:16,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741880_1056 (size=29229) 2024-12-13T07:33:16,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741881_1057 (size=169089) 2024-12-13T07:33:16,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741881_1057 (size=169089) 2024-12-13T07:33:16,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741881_1057 (size=169089) 2024-12-13T07:33:16,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741882_1058 (size=5175431) 2024-12-13T07:33:16,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741882_1058 (size=5175431) 2024-12-13T07:33:16,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741882_1058 (size=5175431) 2024-12-13T07:33:16,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741883_1059 (size=136454) 2024-12-13T07:33:16,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741883_1059 (size=136454) 2024-12-13T07:33:16,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741883_1059 (size=136454) 2024-12-13T07:33:16,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741884_1060 (size=907854) 2024-12-13T07:33:16,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741884_1060 (size=907854) 2024-12-13T07:33:16,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741884_1060 (size=907854) 2024-12-13T07:33:16,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741885_1061 (size=3317408) 2024-12-13T07:33:16,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741885_1061 (size=3317408) 2024-12-13T07:33:16,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741885_1061 (size=3317408) 2024-12-13T07:33:16,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741886_1062 (size=503880) 2024-12-13T07:33:16,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741886_1062 (size=503880) 2024-12-13T07:33:16,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741886_1062 (size=503880) 2024-12-13T07:33:17,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741887_1063 (size=4695811) 2024-12-13T07:33:17,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741887_1063 (size=4695811) 2024-12-13T07:33:17,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741887_1063 (size=4695811) 2024-12-13T07:33:17,004 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-13T07:33:17,014 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportWithTargetName' hfile list 2024-12-13T07:33:17,022 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-13T07:33:17,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741888_1064 (size=342) 2024-12-13T07:33:17,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741888_1064 (size=342) 2024-12-13T07:33:17,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741888_1064 (size=342) 2024-12-13T07:33:17,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741889_1065 (size=15) 2024-12-13T07:33:17,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741889_1065 (size=15) 2024-12-13T07:33:17,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741889_1065 (size=15) 2024-12-13T07:33:17,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741890_1066 (size=304882) 2024-12-13T07:33:17,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741890_1066 (size=304882) 2024-12-13T07:33:17,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741890_1066 (size=304882) 2024-12-13T07:33:17,410 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-13T07:33:17,782 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-13T07:33:17,783 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-13T07:33:17,948 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0001_000001 (auth:SIMPLE) from 127.0.0.1:53046 2024-12-13T07:33:25,309 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0001_000001 (auth:SIMPLE) from 127.0.0.1:42322 2024-12-13T07:33:25,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741891_1067 (size=350556) 2024-12-13T07:33:25,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741891_1067 (size=350556) 2024-12-13T07:33:25,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741891_1067 (size=350556) 2024-12-13T07:33:27,699 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0001_000001 (auth:SIMPLE) from 127.0.0.1:33052 2024-12-13T07:33:30,630 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T07:33:31,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741892_1068 (size=8326) 2024-12-13T07:33:31,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741892_1068 (size=8326) 2024-12-13T07:33:31,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741892_1068 (size=8326) 2024-12-13T07:33:31,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741893_1069 (size=5288) 2024-12-13T07:33:31,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741893_1069 (size=5288) 2024-12-13T07:33:31,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741893_1069 (size=5288) 2024-12-13T07:33:31,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741894_1070 (size=17419) 2024-12-13T07:33:31,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741894_1070 (size=17419) 2024-12-13T07:33:31,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741894_1070 (size=17419) 2024-12-13T07:33:31,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741895_1071 (size=464) 2024-12-13T07:33:31,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741895_1071 (size=464) 2024-12-13T07:33:31,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741895_1071 (size=464) 2024-12-13T07:33:31,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741896_1072 (size=17419) 2024-12-13T07:33:31,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741896_1072 (size=17419) 2024-12-13T07:33:31,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741896_1072 (size=17419) 2024-12-13T07:33:31,686 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-0_3/usercache/jenkins/appcache/application_1734075189398_0001/container_1734075189398_0001_01_000002/launch_container.sh] 2024-12-13T07:33:31,686 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-0_3/usercache/jenkins/appcache/application_1734075189398_0001/container_1734075189398_0001_01_000002/container_tokens] 2024-12-13T07:33:31,686 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-0_3/usercache/jenkins/appcache/application_1734075189398_0001/container_1734075189398_0001_01_000002/sysfs] 2024-12-13T07:33:31,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741897_1073 (size=350556) 2024-12-13T07:33:31,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741897_1073 (size=350556) 2024-12-13T07:33:31,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741897_1073 (size=350556) 2024-12-13T07:33:31,708 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0001_000001 (auth:SIMPLE) from 127.0.0.1:34558 2024-12-13T07:33:33,128 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-13T07:33:33,129 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-13T07:33:33,136 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: testExportWithTargetName 2024-12-13T07:33:33,137 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-13T07:33:33,137 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-13T07:33:33,138 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-313103365_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testExportWithTargetName at hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-13T07:33:33,138 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testExportWithTargetName/.snapshotinfo 2024-12-13T07:33:33,138 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testExportWithTargetName/data.manifest 2024-12-13T07:33:33,138 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-313103365_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075194434/.hbase-snapshot/testExportWithTargetName at hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075194434/.hbase-snapshot/testExportWithTargetName 2024-12-13T07:33:33,139 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075194434/.hbase-snapshot/testExportWithTargetName/.snapshotinfo 2024-12-13T07:33:33,139 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075194434/.hbase-snapshot/testExportWithTargetName/data.manifest 2024-12-13T07:33:33,152 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportWithTargetName 2024-12-13T07:33:33,156 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportWithTargetName 2024-12-13T07:33:33,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=23, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportWithTargetName 2024-12-13T07:33:33,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=23 2024-12-13T07:33:33,168 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075213168"}]},"ts":"1734075213168"} 2024-12-13T07:33:33,171 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithTargetName, state=DISABLING in hbase:meta 2024-12-13T07:33:33,181 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithTargetName to state=DISABLING 2024-12-13T07:33:33,184 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=24, ppid=23, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportWithTargetName}] 2024-12-13T07:33:33,191 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=9fbb3809c42d8083292c98070b60fdcb, UNASSIGN}, {pid=26, ppid=24, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=c0e1eccb42c55f51c6e5369f86ec45ca, UNASSIGN}] 2024-12-13T07:33:33,193 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=25, ppid=24, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=9fbb3809c42d8083292c98070b60fdcb, UNASSIGN 2024-12-13T07:33:33,193 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=26, ppid=24, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=c0e1eccb42c55f51c6e5369f86ec45ca, UNASSIGN 2024-12-13T07:33:33,194 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=26 updating hbase:meta row=c0e1eccb42c55f51c6e5369f86ec45ca, regionState=CLOSING, regionLocation=9eb75de94ef3,36021,1734075182342 2024-12-13T07:33:33,194 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=25 updating hbase:meta row=9fbb3809c42d8083292c98070b60fdcb, regionState=CLOSING, regionLocation=9eb75de94ef3,41895,1734075182430 2024-12-13T07:33:33,196 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T07:33:33,197 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; CloseRegionProcedure c0e1eccb42c55f51c6e5369f86ec45ca, server=9eb75de94ef3,36021,1734075182342}] 2024-12-13T07:33:33,197 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T07:33:33,201 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=28, ppid=25, state=RUNNABLE; CloseRegionProcedure 9fbb3809c42d8083292c98070b60fdcb, server=9eb75de94ef3,41895,1734075182430}] 2024-12-13T07:33:33,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=23 2024-12-13T07:33:33,354 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,36021,1734075182342 2024-12-13T07:33:33,354 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,41895,1734075182430 2024-12-13T07:33:33,355 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] handler.UnassignRegionHandler(124): Close c0e1eccb42c55f51c6e5369f86ec45ca 2024-12-13T07:33:33,355 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] handler.UnassignRegionHandler(124): Close 9fbb3809c42d8083292c98070b60fdcb 2024-12-13T07:33:33,356 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T07:33:33,356 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T07:33:33,357 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1681): Closing 9fbb3809c42d8083292c98070b60fdcb, disabling compactions & flushes 2024-12-13T07:33:33,357 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1681): Closing c0e1eccb42c55f51c6e5369f86ec45ca, disabling compactions & flushes 2024-12-13T07:33:33,357 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1703): Closing region testtb-testExportWithTargetName,1,1734075191353.c0e1eccb42c55f51c6e5369f86ec45ca. 2024-12-13T07:33:33,357 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1703): Closing region testtb-testExportWithTargetName,,1734075191353.9fbb3809c42d8083292c98070b60fdcb. 2024-12-13T07:33:33,357 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithTargetName,1,1734075191353.c0e1eccb42c55f51c6e5369f86ec45ca. 2024-12-13T07:33:33,357 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithTargetName,,1734075191353.9fbb3809c42d8083292c98070b60fdcb. 2024-12-13T07:33:33,357 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithTargetName,1,1734075191353.c0e1eccb42c55f51c6e5369f86ec45ca. after waiting 0 ms 2024-12-13T07:33:33,357 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithTargetName,,1734075191353.9fbb3809c42d8083292c98070b60fdcb. after waiting 0 ms 2024-12-13T07:33:33,357 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithTargetName,1,1734075191353.c0e1eccb42c55f51c6e5369f86ec45ca. 2024-12-13T07:33:33,357 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithTargetName,,1734075191353.9fbb3809c42d8083292c98070b60fdcb. 2024-12-13T07:33:33,366 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithTargetName/9fbb3809c42d8083292c98070b60fdcb/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T07:33:33,368 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithTargetName/c0e1eccb42c55f51c6e5369f86ec45ca/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T07:33:33,370 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T07:33:33,370 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T07:33:33,370 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1922): Closed testtb-testExportWithTargetName,1,1734075191353.c0e1eccb42c55f51c6e5369f86ec45ca. 2024-12-13T07:33:33,370 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1922): Closed testtb-testExportWithTargetName,,1734075191353.9fbb3809c42d8083292c98070b60fdcb. 2024-12-13T07:33:33,370 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1635): Region close journal for c0e1eccb42c55f51c6e5369f86ec45ca: 2024-12-13T07:33:33,370 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1635): Region close journal for 9fbb3809c42d8083292c98070b60fdcb: 2024-12-13T07:33:33,373 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] handler.UnassignRegionHandler(170): Closed c0e1eccb42c55f51c6e5369f86ec45ca 2024-12-13T07:33:33,374 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=26 updating hbase:meta row=c0e1eccb42c55f51c6e5369f86ec45ca, regionState=CLOSED 2024-12-13T07:33:33,375 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] handler.UnassignRegionHandler(170): Closed 9fbb3809c42d8083292c98070b60fdcb 2024-12-13T07:33:33,376 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=25 updating hbase:meta row=9fbb3809c42d8083292c98070b60fdcb, regionState=CLOSED 2024-12-13T07:33:33,382 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-12-13T07:33:33,383 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=28, resume processing ppid=25 2024-12-13T07:33:33,383 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; CloseRegionProcedure c0e1eccb42c55f51c6e5369f86ec45ca, server=9eb75de94ef3,36021,1734075182342 in 181 msec 2024-12-13T07:33:33,383 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, ppid=25, state=SUCCESS; CloseRegionProcedure 9fbb3809c42d8083292c98070b60fdcb, server=9eb75de94ef3,41895,1734075182430 in 181 msec 2024-12-13T07:33:33,384 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, ppid=24, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=c0e1eccb42c55f51c6e5369f86ec45ca, UNASSIGN in 191 msec 2024-12-13T07:33:33,388 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-12-13T07:33:33,388 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=9fbb3809c42d8083292c98070b60fdcb, UNASSIGN in 192 msec 2024-12-13T07:33:33,393 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=24, resume processing ppid=23 2024-12-13T07:33:33,394 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, ppid=23, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportWithTargetName in 207 msec 2024-12-13T07:33:33,398 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075213398"}]},"ts":"1734075213398"} 2024-12-13T07:33:33,401 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithTargetName, state=DISABLED in hbase:meta 2024-12-13T07:33:33,406 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithTargetName to state=DISABLED 2024-12-13T07:33:33,410 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, state=SUCCESS; DisableTableProcedure table=testtb-testExportWithTargetName in 250 msec 2024-12-13T07:33:33,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=23 2024-12-13T07:33:33,471 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportWithTargetName, procId: 23 completed 2024-12-13T07:33:33,476 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportWithTargetName 2024-12-13T07:33:33,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=29, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-13T07:33:33,485 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=29, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-13T07:33:33,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportWithTargetName 2024-12-13T07:33:33,491 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=29, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-13T07:33:33,496 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41895 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportWithTargetName 2024-12-13T07:33:33,501 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithTargetName/9fbb3809c42d8083292c98070b60fdcb 2024-12-13T07:33:33,501 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithTargetName/c0e1eccb42c55f51c6e5369f86ec45ca 2024-12-13T07:33:33,508 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithTargetName/c0e1eccb42c55f51c6e5369f86ec45ca/cf, FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithTargetName/c0e1eccb42c55f51c6e5369f86ec45ca/recovered.edits] 2024-12-13T07:33:33,508 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithTargetName/9fbb3809c42d8083292c98070b60fdcb/cf, FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithTargetName/9fbb3809c42d8083292c98070b60fdcb/recovered.edits] 2024-12-13T07:33:33,514 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-13T07:33:33,514 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-13T07:33:33,515 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-13T07:33:33,515 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-13T07:33:33,516 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-13T07:33:33,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-13T07:33:33,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:33:33,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:33:33,523 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data null 2024-12-13T07:33:33,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:33:33,523 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data null 2024-12-13T07:33:33,523 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-13T07:33:33,523 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data null 2024-12-13T07:33:33,523 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-13T07:33:33,523 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-13T07:33:33,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:33:33,525 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithTargetName/9fbb3809c42d8083292c98070b60fdcb/cf/6c56e352bbd1450a82a20a94f180af2f to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testExportWithTargetName/9fbb3809c42d8083292c98070b60fdcb/cf/6c56e352bbd1450a82a20a94f180af2f 2024-12-13T07:33:33,525 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithTargetName/c0e1eccb42c55f51c6e5369f86ec45ca/cf/1348d3002acc4f7a90af87e5eaaaab05 to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testExportWithTargetName/c0e1eccb42c55f51c6e5369f86ec45ca/cf/1348d3002acc4f7a90af87e5eaaaab05 2024-12-13T07:33:33,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=29 2024-12-13T07:33:33,538 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithTargetName/c0e1eccb42c55f51c6e5369f86ec45ca/recovered.edits/9.seqid to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testExportWithTargetName/c0e1eccb42c55f51c6e5369f86ec45ca/recovered.edits/9.seqid 2024-12-13T07:33:33,539 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithTargetName/c0e1eccb42c55f51c6e5369f86ec45ca 2024-12-13T07:33:33,542 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithTargetName/9fbb3809c42d8083292c98070b60fdcb/recovered.edits/9.seqid to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testExportWithTargetName/9fbb3809c42d8083292c98070b60fdcb/recovered.edits/9.seqid 2024-12-13T07:33:33,543 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithTargetName/9fbb3809c42d8083292c98070b60fdcb 2024-12-13T07:33:33,543 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithTargetName regions 2024-12-13T07:33:33,546 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=29, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-13T07:33:33,551 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42189 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-13T07:33:33,555 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportWithTargetName from hbase:meta 2024-12-13T07:33:33,561 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportWithTargetName' descriptor. 2024-12-13T07:33:33,565 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=29, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-13T07:33:33,565 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportWithTargetName' from region states. 2024-12-13T07:33:33,565 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,,1734075191353.9fbb3809c42d8083292c98070b60fdcb.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734075213565"}]},"ts":"9223372036854775807"} 2024-12-13T07:33:33,566 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,1,1734075191353.c0e1eccb42c55f51c6e5369f86ec45ca.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734075213565"}]},"ts":"9223372036854775807"} 2024-12-13T07:33:33,569 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-13T07:33:33,569 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 9fbb3809c42d8083292c98070b60fdcb, NAME => 'testtb-testExportWithTargetName,,1734075191353.9fbb3809c42d8083292c98070b60fdcb.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => c0e1eccb42c55f51c6e5369f86ec45ca, NAME => 'testtb-testExportWithTargetName,1,1734075191353.c0e1eccb42c55f51c6e5369f86ec45ca.', STARTKEY => '1', ENDKEY => ''}] 2024-12-13T07:33:33,570 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportWithTargetName' as deleted. 2024-12-13T07:33:33,570 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734075213570"}]},"ts":"9223372036854775807"} 2024-12-13T07:33:33,574 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportWithTargetName state from META 2024-12-13T07:33:33,582 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=29, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-13T07:33:33,584 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, state=SUCCESS; DeleteTableProcedure table=testtb-testExportWithTargetName in 105 msec 2024-12-13T07:33:33,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=29 2024-12-13T07:33:33,628 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportWithTargetName, procId: 29 completed 2024-12-13T07:33:33,641 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithTargetName" 2024-12-13T07:33:33,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportWithTargetName 2024-12-13T07:33:33,646 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithTargetName" 2024-12-13T07:33:33,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportWithTargetName 2024-12-13T07:33:33,671 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=777 (was 724) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_585905401_1 at /127.0.0.1:34408 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-313103365_22 at /127.0.0.1:33844 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: htable-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1658146201) connection to localhost/127.0.0.1:43561 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DeletionService #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-313103365_22 at /127.0.0.1:34436 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-1299 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-313103365_22 at /127.0.0.1:39694 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45875 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43103 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: htable-pool-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43561 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x6218feab-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_585905401_1 at /127.0.0.1:39656 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x6218feab-shared-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1658146201) connection to localhost/127.0.0.1:43103 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x6218feab-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Container metrics unregistration java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x6218feab-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 53955) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=803 (was 783) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=594 (was 566) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=7701 (was 9155) 2024-12-13T07:33:33,672 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=777 is superior to 500 2024-12-13T07:33:33,688 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=777, OpenFileDescriptor=803, MaxFileDescriptor=1048576, SystemLoadAverage=594, ProcessCount=17, AvailableMemoryMB=7695 2024-12-13T07:33:33,688 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=777 is superior to 500 2024-12-13T07:33:33,691 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T07:33:33,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportWithResetTtl 2024-12-13T07:33:33,693 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T07:33:33,693 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:33:33,694 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithResetTtl" procId is: 30 2024-12-13T07:33:33,694 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T07:33:33,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-13T07:33:33,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741898_1074 (size=404) 2024-12-13T07:33:33,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741898_1074 (size=404) 2024-12-13T07:33:33,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741898_1074 (size=404) 2024-12-13T07:33:33,706 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 42d35628cca5707cd4a5e6b8ee4a1b9e, NAME => 'testtb-testExportWithResetTtl,,1734075213690.42d35628cca5707cd4a5e6b8ee4a1b9e.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:33:33,706 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 96c2377ea1a745dd079d620502e62feb, NAME => 'testtb-testExportWithResetTtl,1,1734075213690.96c2377ea1a745dd079d620502e62feb.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:33:33,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741900_1076 (size=65) 2024-12-13T07:33:33,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741900_1076 (size=65) 2024-12-13T07:33:33,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741899_1075 (size=65) 2024-12-13T07:33:33,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741899_1075 (size=65) 2024-12-13T07:33:33,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741899_1075 (size=65) 2024-12-13T07:33:33,716 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithResetTtl,1,1734075213690.96c2377ea1a745dd079d620502e62feb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:33:33,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741900_1076 (size=65) 2024-12-13T07:33:33,716 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1681): Closing 96c2377ea1a745dd079d620502e62feb, disabling compactions & flushes 2024-12-13T07:33:33,716 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithResetTtl,1,1734075213690.96c2377ea1a745dd079d620502e62feb. 2024-12-13T07:33:33,716 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithResetTtl,1,1734075213690.96c2377ea1a745dd079d620502e62feb. 2024-12-13T07:33:33,716 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithResetTtl,1,1734075213690.96c2377ea1a745dd079d620502e62feb. after waiting 0 ms 2024-12-13T07:33:33,716 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithResetTtl,1,1734075213690.96c2377ea1a745dd079d620502e62feb. 2024-12-13T07:33:33,716 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportWithResetTtl,1,1734075213690.96c2377ea1a745dd079d620502e62feb. 2024-12-13T07:33:33,716 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1635): Region close journal for 96c2377ea1a745dd079d620502e62feb: 2024-12-13T07:33:33,785 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-13T07:33:33,786 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47982, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-13T07:33:33,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-13T07:33:33,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-13T07:33:34,117 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithResetTtl,,1734075213690.42d35628cca5707cd4a5e6b8ee4a1b9e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:33:34,117 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1681): Closing 42d35628cca5707cd4a5e6b8ee4a1b9e, disabling compactions & flushes 2024-12-13T07:33:34,117 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithResetTtl,,1734075213690.42d35628cca5707cd4a5e6b8ee4a1b9e. 2024-12-13T07:33:34,117 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithResetTtl,,1734075213690.42d35628cca5707cd4a5e6b8ee4a1b9e. 2024-12-13T07:33:34,117 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithResetTtl,,1734075213690.42d35628cca5707cd4a5e6b8ee4a1b9e. after waiting 0 ms 2024-12-13T07:33:34,117 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithResetTtl,,1734075213690.42d35628cca5707cd4a5e6b8ee4a1b9e. 2024-12-13T07:33:34,117 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportWithResetTtl,,1734075213690.42d35628cca5707cd4a5e6b8ee4a1b9e. 2024-12-13T07:33:34,118 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1635): Region close journal for 42d35628cca5707cd4a5e6b8ee4a1b9e: 2024-12-13T07:33:34,119 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T07:33:34,120 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,1,1734075213690.96c2377ea1a745dd079d620502e62feb.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1734075214120"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734075214120"}]},"ts":"1734075214120"} 2024-12-13T07:33:34,120 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,,1734075213690.42d35628cca5707cd4a5e6b8ee4a1b9e.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1734075214120"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734075214120"}]},"ts":"1734075214120"} 2024-12-13T07:33:34,128 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-13T07:33:34,131 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T07:33:34,131 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075214131"}]},"ts":"1734075214131"} 2024-12-13T07:33:34,133 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-13T07:33:34,164 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(202): Hosts are {9eb75de94ef3=0} racks are {/default-rack=0} 2024-12-13T07:33:34,166 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-13T07:33:34,166 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-13T07:33:34,166 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-13T07:33:34,166 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-13T07:33:34,166 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-13T07:33:34,166 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-13T07:33:34,167 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-13T07:33:34,167 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=42d35628cca5707cd4a5e6b8ee4a1b9e, ASSIGN}, {pid=32, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=96c2377ea1a745dd079d620502e62feb, ASSIGN}] 2024-12-13T07:33:34,170 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=31, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=42d35628cca5707cd4a5e6b8ee4a1b9e, ASSIGN 2024-12-13T07:33:34,172 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=32, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=96c2377ea1a745dd079d620502e62feb, ASSIGN 2024-12-13T07:33:34,173 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=31, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=42d35628cca5707cd4a5e6b8ee4a1b9e, ASSIGN; state=OFFLINE, location=9eb75de94ef3,36021,1734075182342; forceNewPlan=false, retain=false 2024-12-13T07:33:34,173 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=32, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=96c2377ea1a745dd079d620502e62feb, ASSIGN; state=OFFLINE, location=9eb75de94ef3,41895,1734075182430; forceNewPlan=false, retain=false 2024-12-13T07:33:34,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-13T07:33:34,323 INFO [9eb75de94ef3:33713 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-13T07:33:34,324 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=31 updating hbase:meta row=42d35628cca5707cd4a5e6b8ee4a1b9e, regionState=OPENING, regionLocation=9eb75de94ef3,36021,1734075182342 2024-12-13T07:33:34,324 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=96c2377ea1a745dd079d620502e62feb, regionState=OPENING, regionLocation=9eb75de94ef3,41895,1734075182430 2024-12-13T07:33:34,326 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE; OpenRegionProcedure 96c2377ea1a745dd079d620502e62feb, server=9eb75de94ef3,41895,1734075182430}] 2024-12-13T07:33:34,330 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=34, ppid=31, state=RUNNABLE; OpenRegionProcedure 42d35628cca5707cd4a5e6b8ee4a1b9e, server=9eb75de94ef3,36021,1734075182342}] 2024-12-13T07:33:34,486 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,41895,1734075182430 2024-12-13T07:33:34,488 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,36021,1734075182342 2024-12-13T07:33:34,490 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] handler.AssignRegionHandler(135): Open testtb-testExportWithResetTtl,1,1734075213690.96c2377ea1a745dd079d620502e62feb. 2024-12-13T07:33:34,491 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7285): Opening region: {ENCODED => 96c2377ea1a745dd079d620502e62feb, NAME => 'testtb-testExportWithResetTtl,1,1734075213690.96c2377ea1a745dd079d620502e62feb.', STARTKEY => '1', ENDKEY => ''} 2024-12-13T07:33:34,491 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithResetTtl,1,1734075213690.96c2377ea1a745dd079d620502e62feb. service=AccessControlService 2024-12-13T07:33:34,492 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T07:33:34,492 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 96c2377ea1a745dd079d620502e62feb 2024-12-13T07:33:34,492 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(894): Instantiated testtb-testExportWithResetTtl,1,1734075213690.96c2377ea1a745dd079d620502e62feb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:33:34,492 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7327): checking encryption for 96c2377ea1a745dd079d620502e62feb 2024-12-13T07:33:34,492 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7330): checking classloading for 96c2377ea1a745dd079d620502e62feb 2024-12-13T07:33:34,492 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] handler.AssignRegionHandler(135): Open testtb-testExportWithResetTtl,,1734075213690.42d35628cca5707cd4a5e6b8ee4a1b9e. 2024-12-13T07:33:34,493 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7285): Opening region: {ENCODED => 42d35628cca5707cd4a5e6b8ee4a1b9e, NAME => 'testtb-testExportWithResetTtl,,1734075213690.42d35628cca5707cd4a5e6b8ee4a1b9e.', STARTKEY => '', ENDKEY => '1'} 2024-12-13T07:33:34,493 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithResetTtl,,1734075213690.42d35628cca5707cd4a5e6b8ee4a1b9e. service=AccessControlService 2024-12-13T07:33:34,494 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T07:33:34,494 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 42d35628cca5707cd4a5e6b8ee4a1b9e 2024-12-13T07:33:34,494 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(894): Instantiated testtb-testExportWithResetTtl,,1734075213690.42d35628cca5707cd4a5e6b8ee4a1b9e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:33:34,494 INFO [StoreOpener-96c2377ea1a745dd079d620502e62feb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 96c2377ea1a745dd079d620502e62feb 2024-12-13T07:33:34,494 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7327): checking encryption for 42d35628cca5707cd4a5e6b8ee4a1b9e 2024-12-13T07:33:34,494 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7330): checking classloading for 42d35628cca5707cd4a5e6b8ee4a1b9e 2024-12-13T07:33:34,496 INFO [StoreOpener-96c2377ea1a745dd079d620502e62feb-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 96c2377ea1a745dd079d620502e62feb columnFamilyName cf 2024-12-13T07:33:34,497 DEBUG [StoreOpener-96c2377ea1a745dd079d620502e62feb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:33:34,498 INFO [StoreOpener-96c2377ea1a745dd079d620502e62feb-1 {}] regionserver.HStore(327): Store=96c2377ea1a745dd079d620502e62feb/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T07:33:34,499 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithResetTtl/96c2377ea1a745dd079d620502e62feb 2024-12-13T07:33:34,499 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithResetTtl/96c2377ea1a745dd079d620502e62feb 2024-12-13T07:33:34,499 INFO [StoreOpener-42d35628cca5707cd4a5e6b8ee4a1b9e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 42d35628cca5707cd4a5e6b8ee4a1b9e 2024-12-13T07:33:34,502 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1085): writing seq id for 96c2377ea1a745dd079d620502e62feb 2024-12-13T07:33:34,503 INFO [StoreOpener-42d35628cca5707cd4a5e6b8ee4a1b9e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 42d35628cca5707cd4a5e6b8ee4a1b9e columnFamilyName cf 2024-12-13T07:33:34,503 DEBUG [StoreOpener-42d35628cca5707cd4a5e6b8ee4a1b9e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:33:34,504 INFO [StoreOpener-42d35628cca5707cd4a5e6b8ee4a1b9e-1 {}] regionserver.HStore(327): Store=42d35628cca5707cd4a5e6b8ee4a1b9e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T07:33:34,505 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithResetTtl/42d35628cca5707cd4a5e6b8ee4a1b9e 2024-12-13T07:33:34,505 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithResetTtl/96c2377ea1a745dd079d620502e62feb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T07:33:34,505 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithResetTtl/42d35628cca5707cd4a5e6b8ee4a1b9e 2024-12-13T07:33:34,506 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1102): Opened 96c2377ea1a745dd079d620502e62feb; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64290040, jitterRate=-0.042003750801086426}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T07:33:34,508 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1001): Region open journal for 96c2377ea1a745dd079d620502e62feb: 2024-12-13T07:33:34,508 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(1085): writing seq id for 42d35628cca5707cd4a5e6b8ee4a1b9e 2024-12-13T07:33:34,510 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithResetTtl,1,1734075213690.96c2377ea1a745dd079d620502e62feb., pid=33, masterSystemTime=1734075214485 2024-12-13T07:33:34,513 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithResetTtl,1,1734075213690.96c2377ea1a745dd079d620502e62feb. 2024-12-13T07:33:34,514 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] handler.AssignRegionHandler(164): Opened testtb-testExportWithResetTtl,1,1734075213690.96c2377ea1a745dd079d620502e62feb. 2024-12-13T07:33:34,514 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=96c2377ea1a745dd079d620502e62feb, regionState=OPEN, openSeqNum=2, regionLocation=9eb75de94ef3,41895,1734075182430 2024-12-13T07:33:34,514 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithResetTtl/42d35628cca5707cd4a5e6b8ee4a1b9e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T07:33:34,518 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(1102): Opened 42d35628cca5707cd4a5e6b8ee4a1b9e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63842211, jitterRate=-0.04867692291736603}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T07:33:34,518 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(1001): Region open journal for 42d35628cca5707cd4a5e6b8ee4a1b9e: 2024-12-13T07:33:34,520 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-12-13T07:33:34,519 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithResetTtl,,1734075213690.42d35628cca5707cd4a5e6b8ee4a1b9e., pid=34, masterSystemTime=1734075214488 2024-12-13T07:33:34,521 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; OpenRegionProcedure 96c2377ea1a745dd079d620502e62feb, server=9eb75de94ef3,41895,1734075182430 in 190 msec 2024-12-13T07:33:34,522 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithResetTtl,,1734075213690.42d35628cca5707cd4a5e6b8ee4a1b9e. 2024-12-13T07:33:34,522 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] handler.AssignRegionHandler(164): Opened testtb-testExportWithResetTtl,,1734075213690.42d35628cca5707cd4a5e6b8ee4a1b9e. 2024-12-13T07:33:34,523 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, ppid=30, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=96c2377ea1a745dd079d620502e62feb, ASSIGN in 353 msec 2024-12-13T07:33:34,523 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=31 updating hbase:meta row=42d35628cca5707cd4a5e6b8ee4a1b9e, regionState=OPEN, openSeqNum=2, regionLocation=9eb75de94ef3,36021,1734075182342 2024-12-13T07:33:34,529 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=34, resume processing ppid=31 2024-12-13T07:33:34,530 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, ppid=31, state=SUCCESS; OpenRegionProcedure 42d35628cca5707cd4a5e6b8ee4a1b9e, server=9eb75de94ef3,36021,1734075182342 in 196 msec 2024-12-13T07:33:34,532 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-12-13T07:33:34,532 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=42d35628cca5707cd4a5e6b8ee4a1b9e, ASSIGN in 362 msec 2024-12-13T07:33:34,533 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T07:33:34,533 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075214533"}]},"ts":"1734075214533"} 2024-12-13T07:33:34,536 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-13T07:33:34,582 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T07:33:34,583 DEBUG [PEWorker-3 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportWithResetTtl jenkins: RWXCA 2024-12-13T07:33:34,587 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41895 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-13T07:33:34,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:33:34,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:33:34,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:33:34,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:33:34,614 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-13T07:33:34,614 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-13T07:33:34,614 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-13T07:33:34,614 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-13T07:33:34,615 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-13T07:33:34,615 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-13T07:33:34,618 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; CreateTableProcedure table=testtb-testExportWithResetTtl in 924 msec 2024-12-13T07:33:34,757 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithResetTtl' 2024-12-13T07:33:34,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-13T07:33:34,801 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportWithResetTtl, procId: 30 completed 2024-12-13T07:33:34,801 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportWithResetTtl get assigned. Timeout = 60000ms 2024-12-13T07:33:34,802 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T07:33:34,807 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportWithResetTtl assigned to meta. Checking AM states. 2024-12-13T07:33:34,808 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T07:33:34,808 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportWithResetTtl assigned. 2024-12-13T07:33:34,813 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-13T07:33:34,815 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47986, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-13T07:33:34,816 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-13T07:33:34,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734075214816 (current time:1734075214816). 2024-12-13T07:33:34,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-13T07:33:34,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-13T07:33:34,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T07:33:34,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5193d3c5 to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@39211ceb 2024-12-13T07:33:34,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@582d214a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:33:34,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:33:34,846 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44364, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:33:34,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5193d3c5 to 127.0.0.1:59810 2024-12-13T07:33:34,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:33:34,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5808fcba to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@69ff6514 2024-12-13T07:33:34,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58692e10, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:33:34,884 DEBUG [hconnection-0x1fa74c80-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:33:34,885 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44372, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:33:34,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:33:34,891 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52186, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:33:34,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5808fcba to 127.0.0.1:59810 2024-12-13T07:33:34,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:33:34,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-13T07:33:34,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T07:33:34,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=35, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-13T07:33:34,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 35 2024-12-13T07:33:34,901 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-13T07:33:34,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-13T07:33:34,903 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T07:33:34,906 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T07:33:34,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741901_1077 (size=161) 2024-12-13T07:33:34,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741901_1077 (size=161) 2024-12-13T07:33:34,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741901_1077 (size=161) 2024-12-13T07:33:34,930 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T07:33:34,930 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=36, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 42d35628cca5707cd4a5e6b8ee4a1b9e}, {pid=37, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 96c2377ea1a745dd079d620502e62feb}] 2024-12-13T07:33:34,932 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=37, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 96c2377ea1a745dd079d620502e62feb 2024-12-13T07:33:34,932 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 42d35628cca5707cd4a5e6b8ee4a1b9e 2024-12-13T07:33:35,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-13T07:33:35,086 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,36021,1734075182342 2024-12-13T07:33:35,086 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,41895,1734075182430 2024-12-13T07:33:35,086 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41895 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=37 2024-12-13T07:33:35,086 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36021 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=36 2024-12-13T07:33:35,087 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1734075213690.96c2377ea1a745dd079d620502e62feb. 2024-12-13T07:33:35,087 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1734075213690.42d35628cca5707cd4a5e6b8ee4a1b9e. 2024-12-13T07:33:35,087 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.HRegion(2538): Flush status journal for 42d35628cca5707cd4a5e6b8ee4a1b9e: 2024-12-13T07:33:35,087 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.HRegion(2538): Flush status journal for 96c2377ea1a745dd079d620502e62feb: 2024-12-13T07:33:35,087 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1734075213690.42d35628cca5707cd4a5e6b8ee4a1b9e. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-13T07:33:35,087 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1734075213690.96c2377ea1a745dd079d620502e62feb. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-13T07:33:35,087 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1734075213690.42d35628cca5707cd4a5e6b8ee4a1b9e.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-13T07:33:35,087 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1734075213690.96c2377ea1a745dd079d620502e62feb.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-13T07:33:35,087 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T07:33:35,088 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T07:33:35,088 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-13T07:33:35,088 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-13T07:33:35,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741903_1079 (size=68) 2024-12-13T07:33:35,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741903_1079 (size=68) 2024-12-13T07:33:35,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741903_1079 (size=68) 2024-12-13T07:33:35,100 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1734075213690.42d35628cca5707cd4a5e6b8ee4a1b9e. 2024-12-13T07:33:35,100 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=36 2024-12-13T07:33:35,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.HMaster(4106): Remote procedure done, pid=36 2024-12-13T07:33:35,101 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 42d35628cca5707cd4a5e6b8ee4a1b9e 2024-12-13T07:33:35,101 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 42d35628cca5707cd4a5e6b8ee4a1b9e 2024-12-13T07:33:35,104 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, ppid=35, state=SUCCESS; SnapshotRegionProcedure 42d35628cca5707cd4a5e6b8ee4a1b9e in 172 msec 2024-12-13T07:33:35,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741902_1078 (size=68) 2024-12-13T07:33:35,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741902_1078 (size=68) 2024-12-13T07:33:35,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741902_1078 (size=68) 2024-12-13T07:33:35,113 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1734075213690.96c2377ea1a745dd079d620502e62feb. 2024-12-13T07:33:35,113 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=37 2024-12-13T07:33:35,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.HMaster(4106): Remote procedure done, pid=37 2024-12-13T07:33:35,114 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 96c2377ea1a745dd079d620502e62feb 2024-12-13T07:33:35,114 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=37, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 96c2377ea1a745dd079d620502e62feb 2024-12-13T07:33:35,117 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=35 2024-12-13T07:33:35,117 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T07:33:35,118 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=35, state=SUCCESS; SnapshotRegionProcedure 96c2377ea1a745dd079d620502e62feb in 185 msec 2024-12-13T07:33:35,118 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T07:33:35,120 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T07:33:35,120 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithResetTtl 2024-12-13T07:33:35,121 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl 2024-12-13T07:33:35,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741904_1080 (size=543) 2024-12-13T07:33:35,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741904_1080 (size=543) 2024-12-13T07:33:35,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741904_1080 (size=543) 2024-12-13T07:33:35,140 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T07:33:35,148 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T07:33:35,148 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/emptySnaptb0-testExportWithResetTtl 2024-12-13T07:33:35,150 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T07:33:35,151 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 35 2024-12-13T07:33:35,152 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 256 msec 2024-12-13T07:33:35,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-13T07:33:35,205 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl, procId: 35 completed 2024-12-13T07:33:35,216 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36021 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithResetTtl,,1734075213690.42d35628cca5707cd4a5e6b8ee4a1b9e. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T07:33:35,218 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41895 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithResetTtl,1,1734075213690.96c2377ea1a745dd079d620502e62feb. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T07:33:35,224 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportWithResetTtl 2024-12-13T07:33:35,225 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportWithResetTtl,,1734075213690.42d35628cca5707cd4a5e6b8ee4a1b9e. 2024-12-13T07:33:35,225 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T07:33:35,245 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-13T07:33:35,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734075215245 (current time:1734075215245). 2024-12-13T07:33:35,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-13T07:33:35,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-13T07:33:35,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T07:33:35,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x031829d5 to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@26feb67a 2024-12-13T07:33:35,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a709ed9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:33:35,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:33:35,506 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44380, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:33:35,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x031829d5 to 127.0.0.1:59810 2024-12-13T07:33:35,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:33:35,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x03950f5c to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@267431be 2024-12-13T07:33:35,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f8a61ae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:33:35,534 DEBUG [hconnection-0x28745c20-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:33:35,535 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44394, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:33:35,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:33:35,538 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52200, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:33:35,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x03950f5c to 127.0.0.1:59810 2024-12-13T07:33:35,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:33:35,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-13T07:33:35,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T07:33:35,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=38, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-13T07:33:35,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 38 2024-12-13T07:33:35,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-13T07:33:35,542 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-13T07:33:35,544 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T07:33:35,546 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T07:33:35,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741905_1081 (size=156) 2024-12-13T07:33:35,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741905_1081 (size=156) 2024-12-13T07:33:35,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741905_1081 (size=156) 2024-12-13T07:33:35,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-13T07:33:35,783 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-13T07:33:35,784 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47988, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-13T07:33:35,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-13T07:33:35,959 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T07:33:35,959 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 42d35628cca5707cd4a5e6b8ee4a1b9e}, {pid=40, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 96c2377ea1a745dd079d620502e62feb}] 2024-12-13T07:33:35,960 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=39, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 42d35628cca5707cd4a5e6b8ee4a1b9e 2024-12-13T07:33:35,960 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 96c2377ea1a745dd079d620502e62feb 2024-12-13T07:33:36,111 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,41895,1734075182430 2024-12-13T07:33:36,111 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,36021,1734075182342 2024-12-13T07:33:36,112 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41895 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=40 2024-12-13T07:33:36,112 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36021 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=39 2024-12-13T07:33:36,112 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1734075213690.96c2377ea1a745dd079d620502e62feb. 2024-12-13T07:33:36,113 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1734075213690.42d35628cca5707cd4a5e6b8ee4a1b9e. 2024-12-13T07:33:36,113 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HRegion(2837): Flushing 96c2377ea1a745dd079d620502e62feb 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-13T07:33:36,113 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HRegion(2837): Flushing 42d35628cca5707cd4a5e6b8ee4a1b9e 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-13T07:33:36,127 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithResetTtl/42d35628cca5707cd4a5e6b8ee4a1b9e/.tmp/cf/8ed2ab2721c7472c963eca6177b2b002 is 71, key is 0587aec6d2a62016058ee31065e547be/cf:q/1734075215216/Put/seqid=0 2024-12-13T07:33:36,136 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithResetTtl/96c2377ea1a745dd079d620502e62feb/.tmp/cf/a20984d8785847ef968bd7b51be1dcd6 is 71, key is 14febc1c341448d6a62e8c0f7a06948d/cf:q/1734075215218/Put/seqid=0 2024-12-13T07:33:36,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741906_1082 (size=5216) 2024-12-13T07:33:36,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741906_1082 (size=5216) 2024-12-13T07:33:36,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741906_1082 (size=5216) 2024-12-13T07:33:36,139 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithResetTtl/42d35628cca5707cd4a5e6b8ee4a1b9e/.tmp/cf/8ed2ab2721c7472c963eca6177b2b002 2024-12-13T07:33:36,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741907_1083 (size=8392) 2024-12-13T07:33:36,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741907_1083 (size=8392) 2024-12-13T07:33:36,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741907_1083 (size=8392) 2024-12-13T07:33:36,147 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithResetTtl/42d35628cca5707cd4a5e6b8ee4a1b9e/.tmp/cf/8ed2ab2721c7472c963eca6177b2b002 as hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithResetTtl/42d35628cca5707cd4a5e6b8ee4a1b9e/cf/8ed2ab2721c7472c963eca6177b2b002 2024-12-13T07:33:36,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-13T07:33:36,148 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithResetTtl/96c2377ea1a745dd079d620502e62feb/.tmp/cf/a20984d8785847ef968bd7b51be1dcd6 2024-12-13T07:33:36,156 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithResetTtl/42d35628cca5707cd4a5e6b8ee4a1b9e/cf/8ed2ab2721c7472c963eca6177b2b002, entries=2, sequenceid=6, filesize=5.1 K 2024-12-13T07:33:36,157 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HRegion(3040): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 42d35628cca5707cd4a5e6b8ee4a1b9e in 44ms, sequenceid=6, compaction requested=false 2024-12-13T07:33:36,157 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithResetTtl/96c2377ea1a745dd079d620502e62feb/.tmp/cf/a20984d8785847ef968bd7b51be1dcd6 as hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithResetTtl/96c2377ea1a745dd079d620502e62feb/cf/a20984d8785847ef968bd7b51be1dcd6 2024-12-13T07:33:36,157 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HRegion(2538): Flush status journal for 42d35628cca5707cd4a5e6b8ee4a1b9e: 2024-12-13T07:33:36,158 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1734075213690.42d35628cca5707cd4a5e6b8ee4a1b9e. for snaptb0-testExportWithResetTtl completed. 2024-12-13T07:33:36,158 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1734075213690.42d35628cca5707cd4a5e6b8ee4a1b9e.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-13T07:33:36,158 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T07:33:36,158 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithResetTtl/42d35628cca5707cd4a5e6b8ee4a1b9e/cf/8ed2ab2721c7472c963eca6177b2b002] hfiles 2024-12-13T07:33:36,158 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithResetTtl/42d35628cca5707cd4a5e6b8ee4a1b9e/cf/8ed2ab2721c7472c963eca6177b2b002 for snapshot=snaptb0-testExportWithResetTtl 2024-12-13T07:33:36,167 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithResetTtl/96c2377ea1a745dd079d620502e62feb/cf/a20984d8785847ef968bd7b51be1dcd6, entries=48, sequenceid=6, filesize=8.2 K 2024-12-13T07:33:36,168 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HRegion(3040): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 96c2377ea1a745dd079d620502e62feb in 55ms, sequenceid=6, compaction requested=false 2024-12-13T07:33:36,168 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HRegion(2538): Flush status journal for 96c2377ea1a745dd079d620502e62feb: 2024-12-13T07:33:36,168 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1734075213690.96c2377ea1a745dd079d620502e62feb. for snaptb0-testExportWithResetTtl completed. 2024-12-13T07:33:36,169 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1734075213690.96c2377ea1a745dd079d620502e62feb.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-13T07:33:36,169 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T07:33:36,169 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithResetTtl/96c2377ea1a745dd079d620502e62feb/cf/a20984d8785847ef968bd7b51be1dcd6] hfiles 2024-12-13T07:33:36,169 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithResetTtl/96c2377ea1a745dd079d620502e62feb/cf/a20984d8785847ef968bd7b51be1dcd6 for snapshot=snaptb0-testExportWithResetTtl 2024-12-13T07:33:36,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741908_1084 (size=107) 2024-12-13T07:33:36,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741908_1084 (size=107) 2024-12-13T07:33:36,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741908_1084 (size=107) 2024-12-13T07:33:36,179 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1734075213690.42d35628cca5707cd4a5e6b8ee4a1b9e. 2024-12-13T07:33:36,179 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=39 2024-12-13T07:33:36,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.HMaster(4106): Remote procedure done, pid=39 2024-12-13T07:33:36,179 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 42d35628cca5707cd4a5e6b8ee4a1b9e 2024-12-13T07:33:36,180 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=39, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 42d35628cca5707cd4a5e6b8ee4a1b9e 2024-12-13T07:33:36,182 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; SnapshotRegionProcedure 42d35628cca5707cd4a5e6b8ee4a1b9e in 221 msec 2024-12-13T07:33:36,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741909_1085 (size=107) 2024-12-13T07:33:36,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741909_1085 (size=107) 2024-12-13T07:33:36,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741909_1085 (size=107) 2024-12-13T07:33:36,190 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1734075213690.96c2377ea1a745dd079d620502e62feb. 2024-12-13T07:33:36,191 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=40 2024-12-13T07:33:36,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.HMaster(4106): Remote procedure done, pid=40 2024-12-13T07:33:36,191 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 96c2377ea1a745dd079d620502e62feb 2024-12-13T07:33:36,191 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 96c2377ea1a745dd079d620502e62feb 2024-12-13T07:33:36,195 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=40, resume processing ppid=38 2024-12-13T07:33:36,195 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T07:33:36,195 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, ppid=38, state=SUCCESS; SnapshotRegionProcedure 96c2377ea1a745dd079d620502e62feb in 233 msec 2024-12-13T07:33:36,196 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T07:33:36,197 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T07:33:36,197 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithResetTtl 2024-12-13T07:33:36,198 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl 2024-12-13T07:33:36,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741910_1086 (size=621) 2024-12-13T07:33:36,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741910_1086 (size=621) 2024-12-13T07:33:36,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741910_1086 (size=621) 2024-12-13T07:33:36,218 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T07:33:36,227 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T07:33:36,228 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testExportWithResetTtl 2024-12-13T07:33:36,230 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T07:33:36,230 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 38 2024-12-13T07:33:36,232 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 690 msec 2024-12-13T07:33:36,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-13T07:33:36,649 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl, procId: 38 completed 2024-12-13T07:33:36,651 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T07:33:36,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=41, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testExportWithResetTtl 2024-12-13T07:33:36,653 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T07:33:36,653 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:33:36,653 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportWithResetTtl" procId is: 41 2024-12-13T07:33:36,654 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T07:33:36,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-13T07:33:36,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741911_1087 (size=397) 2024-12-13T07:33:36,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741911_1087 (size=397) 2024-12-13T07:33:36,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741911_1087 (size=397) 2024-12-13T07:33:36,677 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 6e0e38894b52df0e2e5917ba22ec2cc9, NAME => 'testExportWithResetTtl,,1734075216650.6e0e38894b52df0e2e5917ba22ec2cc9.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:33:36,678 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 3ff931d0ce225fde6a6203e81b6f91e7, NAME => 'testExportWithResetTtl,1,1734075216650.3ff931d0ce225fde6a6203e81b6f91e7.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:33:36,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741912_1088 (size=58) 2024-12-13T07:33:36,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741912_1088 (size=58) 2024-12-13T07:33:36,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741912_1088 (size=58) 2024-12-13T07:33:36,691 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(894): Instantiated testExportWithResetTtl,,1734075216650.6e0e38894b52df0e2e5917ba22ec2cc9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:33:36,691 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1681): Closing 6e0e38894b52df0e2e5917ba22ec2cc9, disabling compactions & flushes 2024-12-13T07:33:36,691 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1703): Closing region testExportWithResetTtl,,1734075216650.6e0e38894b52df0e2e5917ba22ec2cc9. 2024-12-13T07:33:36,692 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportWithResetTtl,,1734075216650.6e0e38894b52df0e2e5917ba22ec2cc9. 2024-12-13T07:33:36,692 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testExportWithResetTtl,,1734075216650.6e0e38894b52df0e2e5917ba22ec2cc9. after waiting 0 ms 2024-12-13T07:33:36,692 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testExportWithResetTtl,,1734075216650.6e0e38894b52df0e2e5917ba22ec2cc9. 2024-12-13T07:33:36,692 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1922): Closed testExportWithResetTtl,,1734075216650.6e0e38894b52df0e2e5917ba22ec2cc9. 2024-12-13T07:33:36,692 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1635): Region close journal for 6e0e38894b52df0e2e5917ba22ec2cc9: 2024-12-13T07:33:36,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741913_1089 (size=58) 2024-12-13T07:33:36,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741913_1089 (size=58) 2024-12-13T07:33:36,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741913_1089 (size=58) 2024-12-13T07:33:36,702 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(894): Instantiated testExportWithResetTtl,1,1734075216650.3ff931d0ce225fde6a6203e81b6f91e7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:33:36,702 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1681): Closing 3ff931d0ce225fde6a6203e81b6f91e7, disabling compactions & flushes 2024-12-13T07:33:36,702 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1703): Closing region testExportWithResetTtl,1,1734075216650.3ff931d0ce225fde6a6203e81b6f91e7. 2024-12-13T07:33:36,702 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportWithResetTtl,1,1734075216650.3ff931d0ce225fde6a6203e81b6f91e7. 2024-12-13T07:33:36,702 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testExportWithResetTtl,1,1734075216650.3ff931d0ce225fde6a6203e81b6f91e7. after waiting 0 ms 2024-12-13T07:33:36,702 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testExportWithResetTtl,1,1734075216650.3ff931d0ce225fde6a6203e81b6f91e7. 2024-12-13T07:33:36,702 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1922): Closed testExportWithResetTtl,1,1734075216650.3ff931d0ce225fde6a6203e81b6f91e7. 2024-12-13T07:33:36,702 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1635): Region close journal for 3ff931d0ce225fde6a6203e81b6f91e7: 2024-12-13T07:33:36,703 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T07:33:36,704 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testExportWithResetTtl,,1734075216650.6e0e38894b52df0e2e5917ba22ec2cc9.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1734075216703"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734075216703"}]},"ts":"1734075216703"} 2024-12-13T07:33:36,704 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testExportWithResetTtl,1,1734075216650.3ff931d0ce225fde6a6203e81b6f91e7.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1734075216703"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734075216703"}]},"ts":"1734075216703"} 2024-12-13T07:33:36,706 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-13T07:33:36,707 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T07:33:36,707 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075216707"}]},"ts":"1734075216707"} 2024-12-13T07:33:36,709 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-13T07:33:36,723 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(202): Hosts are {9eb75de94ef3=0} racks are {/default-rack=0} 2024-12-13T07:33:36,724 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-13T07:33:36,724 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-13T07:33:36,724 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-13T07:33:36,724 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-13T07:33:36,724 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-13T07:33:36,724 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-13T07:33:36,724 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-13T07:33:36,725 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportWithResetTtl, region=6e0e38894b52df0e2e5917ba22ec2cc9, ASSIGN}, {pid=43, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportWithResetTtl, region=3ff931d0ce225fde6a6203e81b6f91e7, ASSIGN}] 2024-12-13T07:33:36,726 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=43, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportWithResetTtl, region=3ff931d0ce225fde6a6203e81b6f91e7, ASSIGN 2024-12-13T07:33:36,726 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportWithResetTtl, region=6e0e38894b52df0e2e5917ba22ec2cc9, ASSIGN 2024-12-13T07:33:36,727 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=43, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=3ff931d0ce225fde6a6203e81b6f91e7, ASSIGN; state=OFFLINE, location=9eb75de94ef3,42189,1734075182526; forceNewPlan=false, retain=false 2024-12-13T07:33:36,727 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=6e0e38894b52df0e2e5917ba22ec2cc9, ASSIGN; state=OFFLINE, location=9eb75de94ef3,36021,1734075182342; forceNewPlan=false, retain=false 2024-12-13T07:33:36,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-13T07:33:36,878 INFO [9eb75de94ef3:33713 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-13T07:33:36,878 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=43 updating hbase:meta row=3ff931d0ce225fde6a6203e81b6f91e7, regionState=OPENING, regionLocation=9eb75de94ef3,42189,1734075182526 2024-12-13T07:33:36,878 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=6e0e38894b52df0e2e5917ba22ec2cc9, regionState=OPENING, regionLocation=9eb75de94ef3,36021,1734075182342 2024-12-13T07:33:36,880 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=43, state=RUNNABLE; OpenRegionProcedure 3ff931d0ce225fde6a6203e81b6f91e7, server=9eb75de94ef3,42189,1734075182526}] 2024-12-13T07:33:36,881 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=45, ppid=42, state=RUNNABLE; OpenRegionProcedure 6e0e38894b52df0e2e5917ba22ec2cc9, server=9eb75de94ef3,36021,1734075182342}] 2024-12-13T07:33:36,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-13T07:33:37,032 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,42189,1734075182526 2024-12-13T07:33:37,033 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,36021,1734075182342 2024-12-13T07:33:37,036 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] handler.AssignRegionHandler(135): Open testExportWithResetTtl,,1734075216650.6e0e38894b52df0e2e5917ba22ec2cc9. 2024-12-13T07:33:37,036 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] handler.AssignRegionHandler(135): Open testExportWithResetTtl,1,1734075216650.3ff931d0ce225fde6a6203e81b6f91e7. 2024-12-13T07:33:37,037 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7285): Opening region: {ENCODED => 3ff931d0ce225fde6a6203e81b6f91e7, NAME => 'testExportWithResetTtl,1,1734075216650.3ff931d0ce225fde6a6203e81b6f91e7.', STARTKEY => '1', ENDKEY => ''} 2024-12-13T07:33:37,037 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7285): Opening region: {ENCODED => 6e0e38894b52df0e2e5917ba22ec2cc9, NAME => 'testExportWithResetTtl,,1734075216650.6e0e38894b52df0e2e5917ba22ec2cc9.', STARTKEY => '', ENDKEY => '1'} 2024-12-13T07:33:37,037 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7999): Registered coprocessor service: region=testExportWithResetTtl,1,1734075216650.3ff931d0ce225fde6a6203e81b6f91e7. service=AccessControlService 2024-12-13T07:33:37,037 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7999): Registered coprocessor service: region=testExportWithResetTtl,,1734075216650.6e0e38894b52df0e2e5917ba22ec2cc9. service=AccessControlService 2024-12-13T07:33:37,037 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T07:33:37,037 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T07:33:37,037 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 3ff931d0ce225fde6a6203e81b6f91e7 2024-12-13T07:33:37,037 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 6e0e38894b52df0e2e5917ba22ec2cc9 2024-12-13T07:33:37,037 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(894): Instantiated testExportWithResetTtl,1,1734075216650.3ff931d0ce225fde6a6203e81b6f91e7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:33:37,037 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(894): Instantiated testExportWithResetTtl,,1734075216650.6e0e38894b52df0e2e5917ba22ec2cc9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:33:37,037 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7327): checking encryption for 3ff931d0ce225fde6a6203e81b6f91e7 2024-12-13T07:33:37,038 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7330): checking classloading for 3ff931d0ce225fde6a6203e81b6f91e7 2024-12-13T07:33:37,038 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7327): checking encryption for 6e0e38894b52df0e2e5917ba22ec2cc9 2024-12-13T07:33:37,038 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7330): checking classloading for 6e0e38894b52df0e2e5917ba22ec2cc9 2024-12-13T07:33:37,039 INFO [StoreOpener-3ff931d0ce225fde6a6203e81b6f91e7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3ff931d0ce225fde6a6203e81b6f91e7 2024-12-13T07:33:37,039 INFO [StoreOpener-6e0e38894b52df0e2e5917ba22ec2cc9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 6e0e38894b52df0e2e5917ba22ec2cc9 2024-12-13T07:33:37,041 INFO [StoreOpener-3ff931d0ce225fde6a6203e81b6f91e7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3ff931d0ce225fde6a6203e81b6f91e7 columnFamilyName cf 2024-12-13T07:33:37,041 INFO [StoreOpener-6e0e38894b52df0e2e5917ba22ec2cc9-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6e0e38894b52df0e2e5917ba22ec2cc9 columnFamilyName cf 2024-12-13T07:33:37,041 DEBUG [StoreOpener-3ff931d0ce225fde6a6203e81b6f91e7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:33:37,041 DEBUG [StoreOpener-6e0e38894b52df0e2e5917ba22ec2cc9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:33:37,041 INFO [StoreOpener-6e0e38894b52df0e2e5917ba22ec2cc9-1 {}] regionserver.HStore(327): Store=6e0e38894b52df0e2e5917ba22ec2cc9/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T07:33:37,041 INFO [StoreOpener-3ff931d0ce225fde6a6203e81b6f91e7-1 {}] regionserver.HStore(327): Store=3ff931d0ce225fde6a6203e81b6f91e7/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T07:33:37,042 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportWithResetTtl/6e0e38894b52df0e2e5917ba22ec2cc9 2024-12-13T07:33:37,043 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportWithResetTtl/6e0e38894b52df0e2e5917ba22ec2cc9 2024-12-13T07:33:37,043 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportWithResetTtl/3ff931d0ce225fde6a6203e81b6f91e7 2024-12-13T07:33:37,043 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportWithResetTtl/3ff931d0ce225fde6a6203e81b6f91e7 2024-12-13T07:33:37,045 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(1085): writing seq id for 6e0e38894b52df0e2e5917ba22ec2cc9 2024-12-13T07:33:37,045 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1085): writing seq id for 3ff931d0ce225fde6a6203e81b6f91e7 2024-12-13T07:33:37,047 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportWithResetTtl/6e0e38894b52df0e2e5917ba22ec2cc9/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T07:33:37,047 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportWithResetTtl/3ff931d0ce225fde6a6203e81b6f91e7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T07:33:37,048 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(1102): Opened 6e0e38894b52df0e2e5917ba22ec2cc9; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61433653, jitterRate=-0.08456723392009735}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T07:33:37,049 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(1001): Region open journal for 6e0e38894b52df0e2e5917ba22ec2cc9: 2024-12-13T07:33:37,049 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1102): Opened 3ff931d0ce225fde6a6203e81b6f91e7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68801270, jitterRate=0.025218814611434937}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T07:33:37,049 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1001): Region open journal for 3ff931d0ce225fde6a6203e81b6f91e7: 2024-12-13T07:33:37,050 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegionServer(2601): Post open deploy tasks for testExportWithResetTtl,,1734075216650.6e0e38894b52df0e2e5917ba22ec2cc9., pid=45, masterSystemTime=1734075217033 2024-12-13T07:33:37,050 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegionServer(2601): Post open deploy tasks for testExportWithResetTtl,1,1734075216650.3ff931d0ce225fde6a6203e81b6f91e7., pid=44, masterSystemTime=1734075217032 2024-12-13T07:33:37,051 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegionServer(2628): Finished post open deploy task for testExportWithResetTtl,,1734075216650.6e0e38894b52df0e2e5917ba22ec2cc9. 2024-12-13T07:33:37,051 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] handler.AssignRegionHandler(164): Opened testExportWithResetTtl,,1734075216650.6e0e38894b52df0e2e5917ba22ec2cc9. 2024-12-13T07:33:37,052 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=6e0e38894b52df0e2e5917ba22ec2cc9, regionState=OPEN, openSeqNum=2, regionLocation=9eb75de94ef3,36021,1734075182342 2024-12-13T07:33:37,052 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegionServer(2628): Finished post open deploy task for testExportWithResetTtl,1,1734075216650.3ff931d0ce225fde6a6203e81b6f91e7. 2024-12-13T07:33:37,052 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] handler.AssignRegionHandler(164): Opened testExportWithResetTtl,1,1734075216650.3ff931d0ce225fde6a6203e81b6f91e7. 2024-12-13T07:33:37,052 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=43 updating hbase:meta row=3ff931d0ce225fde6a6203e81b6f91e7, regionState=OPEN, openSeqNum=2, regionLocation=9eb75de94ef3,42189,1734075182526 2024-12-13T07:33:37,056 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=45, resume processing ppid=42 2024-12-13T07:33:37,056 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=43 2024-12-13T07:33:37,056 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, ppid=42, state=SUCCESS; OpenRegionProcedure 6e0e38894b52df0e2e5917ba22ec2cc9, server=9eb75de94ef3,36021,1734075182342 in 172 msec 2024-12-13T07:33:37,057 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=41, state=SUCCESS; TransitRegionStateProcedure table=testExportWithResetTtl, region=6e0e38894b52df0e2e5917ba22ec2cc9, ASSIGN in 331 msec 2024-12-13T07:33:37,057 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=43, state=SUCCESS; OpenRegionProcedure 3ff931d0ce225fde6a6203e81b6f91e7, server=9eb75de94ef3,42189,1734075182526 in 174 msec 2024-12-13T07:33:37,058 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=43, resume processing ppid=41 2024-12-13T07:33:37,058 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, ppid=41, state=SUCCESS; TransitRegionStateProcedure table=testExportWithResetTtl, region=3ff931d0ce225fde6a6203e81b6f91e7, ASSIGN in 331 msec 2024-12-13T07:33:37,059 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T07:33:37,059 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075217059"}]},"ts":"1734075217059"} 2024-12-13T07:33:37,061 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-13T07:33:37,107 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T07:33:37,107 DEBUG [PEWorker-1 {}] access.PermissionStorage(175): Writing permission with rowKey testExportWithResetTtl jenkins: RWXCA 2024-12-13T07:33:37,109 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41895 {}] access.PermissionStorage(611): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-13T07:33:37,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:33:37,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:33:37,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:33:37,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:33:37,123 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-13T07:33:37,123 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-13T07:33:37,124 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-13T07:33:37,124 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-13T07:33:37,124 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-13T07:33:37,124 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-13T07:33:37,125 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-13T07:33:37,125 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-13T07:33:37,126 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, state=SUCCESS; CreateTableProcedure table=testExportWithResetTtl in 472 msec 2024-12-13T07:33:37,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-13T07:33:37,259 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testExportWithResetTtl, procId: 41 completed 2024-12-13T07:33:37,259 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testExportWithResetTtl get assigned. Timeout = 60000ms 2024-12-13T07:33:37,259 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T07:33:37,262 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testExportWithResetTtl assigned to meta. Checking AM states. 2024-12-13T07:33:37,263 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T07:33:37,263 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testExportWithResetTtl assigned. 2024-12-13T07:33:37,275 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36021 {}] regionserver.HRegion(8254): writing data to region testExportWithResetTtl,,1734075216650.6e0e38894b52df0e2e5917ba22ec2cc9. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T07:33:37,277 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42189 {}] regionserver.HRegion(8254): writing data to region testExportWithResetTtl,1,1734075216650.3ff931d0ce225fde6a6203e81b6f91e7. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T07:33:37,281 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testExportWithResetTtl 2024-12-13T07:33:37,281 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testExportWithResetTtl,,1734075216650.6e0e38894b52df0e2e5917ba22ec2cc9. 2024-12-13T07:33:37,282 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T07:33:37,300 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-13T07:33:37,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734075217300 (current time:1734075217300). 2024-12-13T07:33:37,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-13T07:33:37,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T07:33:37,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x00ec0296 to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@f5085ef 2024-12-13T07:33:37,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@202005e6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:33:37,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:33:37,318 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44400, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:33:37,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x00ec0296 to 127.0.0.1:59810 2024-12-13T07:33:37,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:33:37,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1b36052b to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@10c2945f 2024-12-13T07:33:37,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30edac94, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:33:37,334 DEBUG [hconnection-0x39c997f3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:33:37,336 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44406, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:33:37,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:33:37,339 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52206, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:33:37,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1b36052b to 127.0.0.1:59810 2024-12-13T07:33:37,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:33:37,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] access.PermissionStorage(611): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-13T07:33:37,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T07:33:37,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=46, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-13T07:33:37,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 46 2024-12-13T07:33:37,344 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PREPARE 2024-12-13T07:33:37,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46 2024-12-13T07:33:37,345 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T07:33:37,348 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T07:33:37,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741914_1090 (size=143) 2024-12-13T07:33:37,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741914_1090 (size=143) 2024-12-13T07:33:37,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741914_1090 (size=143) 2024-12-13T07:33:37,360 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T07:33:37,361 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=47, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 6e0e38894b52df0e2e5917ba22ec2cc9}, {pid=48, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 3ff931d0ce225fde6a6203e81b6f91e7}] 2024-12-13T07:33:37,362 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=47, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 6e0e38894b52df0e2e5917ba22ec2cc9 2024-12-13T07:33:37,362 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=48, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 3ff931d0ce225fde6a6203e81b6f91e7 2024-12-13T07:33:37,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46 2024-12-13T07:33:37,513 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,36021,1734075182342 2024-12-13T07:33:37,513 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,42189,1734075182526 2024-12-13T07:33:37,514 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36021 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=47 2024-12-13T07:33:37,514 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42189 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=48 2024-12-13T07:33:37,514 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,,1734075216650.6e0e38894b52df0e2e5917ba22ec2cc9. 2024-12-13T07:33:37,514 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HRegion(2837): Flushing 6e0e38894b52df0e2e5917ba22ec2cc9 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-13T07:33:37,514 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,1,1734075216650.3ff931d0ce225fde6a6203e81b6f91e7. 2024-12-13T07:33:37,515 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing 3ff931d0ce225fde6a6203e81b6f91e7 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-13T07:33:37,532 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportWithResetTtl/3ff931d0ce225fde6a6203e81b6f91e7/.tmp/cf/dc3991217cb347a681ba3b094363dc6f is 71, key is 1d089a5345e8ea4c6560fe20dfa06183/cf:q/1734075217276/Put/seqid=0 2024-12-13T07:33:37,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportWithResetTtl/6e0e38894b52df0e2e5917ba22ec2cc9/.tmp/cf/db89186da7684188849e9c5be53b7a47 is 71, key is 0d6d8ac433af82caea175cffdea40689/cf:q/1734075217275/Put/seqid=0 2024-12-13T07:33:37,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741915_1091 (size=8256) 2024-12-13T07:33:37,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741915_1091 (size=8256) 2024-12-13T07:33:37,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741915_1091 (size=8256) 2024-12-13T07:33:37,544 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportWithResetTtl/3ff931d0ce225fde6a6203e81b6f91e7/.tmp/cf/dc3991217cb347a681ba3b094363dc6f 2024-12-13T07:33:37,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741916_1092 (size=5356) 2024-12-13T07:33:37,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741916_1092 (size=5356) 2024-12-13T07:33:37,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741916_1092 (size=5356) 2024-12-13T07:33:37,557 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportWithResetTtl/6e0e38894b52df0e2e5917ba22ec2cc9/.tmp/cf/db89186da7684188849e9c5be53b7a47 2024-12-13T07:33:37,567 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportWithResetTtl/6e0e38894b52df0e2e5917ba22ec2cc9/.tmp/cf/db89186da7684188849e9c5be53b7a47 as hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportWithResetTtl/6e0e38894b52df0e2e5917ba22ec2cc9/cf/db89186da7684188849e9c5be53b7a47 2024-12-13T07:33:37,573 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportWithResetTtl/3ff931d0ce225fde6a6203e81b6f91e7/.tmp/cf/dc3991217cb347a681ba3b094363dc6f as hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportWithResetTtl/3ff931d0ce225fde6a6203e81b6f91e7/cf/dc3991217cb347a681ba3b094363dc6f 2024-12-13T07:33:37,575 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportWithResetTtl/6e0e38894b52df0e2e5917ba22ec2cc9/cf/db89186da7684188849e9c5be53b7a47, entries=4, sequenceid=5, filesize=5.2 K 2024-12-13T07:33:37,577 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HRegion(3040): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 6e0e38894b52df0e2e5917ba22ec2cc9 in 63ms, sequenceid=5, compaction requested=false 2024-12-13T07:33:37,577 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportWithResetTtl' 2024-12-13T07:33:37,578 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HRegion(2538): Flush status journal for 6e0e38894b52df0e2e5917ba22ec2cc9: 2024-12-13T07:33:37,578 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,,1734075216650.6e0e38894b52df0e2e5917ba22ec2cc9. for snaptb-testExportWithResetTtl completed. 2024-12-13T07:33:37,578 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,,1734075216650.6e0e38894b52df0e2e5917ba22ec2cc9.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-13T07:33:37,578 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T07:33:37,579 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportWithResetTtl/6e0e38894b52df0e2e5917ba22ec2cc9/cf/db89186da7684188849e9c5be53b7a47] hfiles 2024-12-13T07:33:37,579 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportWithResetTtl/6e0e38894b52df0e2e5917ba22ec2cc9/cf/db89186da7684188849e9c5be53b7a47 for snapshot=snaptb-testExportWithResetTtl 2024-12-13T07:33:37,596 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportWithResetTtl/3ff931d0ce225fde6a6203e81b6f91e7/cf/dc3991217cb347a681ba3b094363dc6f, entries=46, sequenceid=5, filesize=8.1 K 2024-12-13T07:33:37,598 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 3ff931d0ce225fde6a6203e81b6f91e7 in 82ms, sequenceid=5, compaction requested=false 2024-12-13T07:33:37,598 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for 3ff931d0ce225fde6a6203e81b6f91e7: 2024-12-13T07:33:37,598 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,1,1734075216650.3ff931d0ce225fde6a6203e81b6f91e7. for snaptb-testExportWithResetTtl completed. 2024-12-13T07:33:37,598 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,1,1734075216650.3ff931d0ce225fde6a6203e81b6f91e7.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-13T07:33:37,598 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T07:33:37,598 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportWithResetTtl/3ff931d0ce225fde6a6203e81b6f91e7/cf/dc3991217cb347a681ba3b094363dc6f] hfiles 2024-12-13T07:33:37,598 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportWithResetTtl/3ff931d0ce225fde6a6203e81b6f91e7/cf/dc3991217cb347a681ba3b094363dc6f for snapshot=snaptb-testExportWithResetTtl 2024-12-13T07:33:37,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741917_1093 (size=100) 2024-12-13T07:33:37,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741917_1093 (size=100) 2024-12-13T07:33:37,620 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,,1734075216650.6e0e38894b52df0e2e5917ba22ec2cc9. 2024-12-13T07:33:37,620 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=47 2024-12-13T07:33:37,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741917_1093 (size=100) 2024-12-13T07:33:37,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.HMaster(4106): Remote procedure done, pid=47 2024-12-13T07:33:37,621 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 6e0e38894b52df0e2e5917ba22ec2cc9 2024-12-13T07:33:37,621 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=47, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 6e0e38894b52df0e2e5917ba22ec2cc9 2024-12-13T07:33:37,626 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, ppid=46, state=SUCCESS; SnapshotRegionProcedure 6e0e38894b52df0e2e5917ba22ec2cc9 in 262 msec 2024-12-13T07:33:37,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46 2024-12-13T07:33:37,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741918_1094 (size=100) 2024-12-13T07:33:37,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741918_1094 (size=100) 2024-12-13T07:33:37,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741918_1094 (size=100) 2024-12-13T07:33:37,660 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,1,1734075216650.3ff931d0ce225fde6a6203e81b6f91e7. 2024-12-13T07:33:37,660 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-12-13T07:33:37,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-12-13T07:33:37,661 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 3ff931d0ce225fde6a6203e81b6f91e7 2024-12-13T07:33:37,661 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=48, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 3ff931d0ce225fde6a6203e81b6f91e7 2024-12-13T07:33:37,664 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=46 2024-12-13T07:33:37,664 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T07:33:37,664 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=46, state=SUCCESS; SnapshotRegionProcedure 3ff931d0ce225fde6a6203e81b6f91e7 in 301 msec 2024-12-13T07:33:37,665 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T07:33:37,665 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T07:33:37,665 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb-testExportWithResetTtl 2024-12-13T07:33:37,666 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-13T07:33:37,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741919_1095 (size=600) 2024-12-13T07:33:37,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741919_1095 (size=600) 2024-12-13T07:33:37,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741919_1095 (size=600) 2024-12-13T07:33:37,690 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T07:33:37,699 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T07:33:37,699 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-13T07:33:37,701 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T07:33:37,701 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 46 2024-12-13T07:33:37,705 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } in 359 msec 2024-12-13T07:33:37,803 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0001_000001 (auth:SIMPLE) from 127.0.0.1:34574 2024-12-13T07:33:37,810 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-0_3/usercache/jenkins/appcache/application_1734075189398_0001/container_1734075189398_0001_01_000001/launch_container.sh] 2024-12-13T07:33:37,810 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-0_3/usercache/jenkins/appcache/application_1734075189398_0001/container_1734075189398_0001_01_000001/container_tokens] 2024-12-13T07:33:37,810 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-0_3/usercache/jenkins/appcache/application_1734075189398_0001/container_1734075189398_0001_01_000001/sysfs] 2024-12-13T07:33:37,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46 2024-12-13T07:33:37,949 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testExportWithResetTtl, procId: 46 completed 2024-12-13T07:33:37,960 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075217960 2024-12-13T07:33:37,960 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:41477, tgtDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075217960, rawTgtDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075217960, srcFsUri=hdfs://localhost:41477, srcDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:33:37,989 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:41477, inputRoot=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:33:37,989 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-313103365_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075217960, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075217960/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-13T07:33:37,991 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-13T07:33:37,997 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb-testExportWithResetTtl to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075217960/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-13T07:33:38,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741920_1096 (size=143) 2024-12-13T07:33:38,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741920_1096 (size=143) 2024-12-13T07:33:38,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741920_1096 (size=143) 2024-12-13T07:33:38,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741921_1097 (size=600) 2024-12-13T07:33:38,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741921_1097 (size=600) 2024-12-13T07:33:38,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741921_1097 (size=600) 2024-12-13T07:33:38,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741922_1098 (size=141) 2024-12-13T07:33:38,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741922_1098 (size=141) 2024-12-13T07:33:38,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741922_1098 (size=141) 2024-12-13T07:33:38,060 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-13T07:33:38,060 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-13T07:33:38,061 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-13T07:33:38,061 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-13T07:33:38,911 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-13T07:33:39,094 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/hadoop-16007339978441249588.jar 2024-12-13T07:33:39,095 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-13T07:33:39,095 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-13T07:33:39,179 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/hadoop-16524727097588005857.jar 2024-12-13T07:33:39,179 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-13T07:33:39,180 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-13T07:33:39,180 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-13T07:33:39,180 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-13T07:33:39,181 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-13T07:33:39,181 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-13T07:33:39,181 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-13T07:33:39,182 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-13T07:33:39,182 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-13T07:33:39,182 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-13T07:33:39,183 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-13T07:33:39,183 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-13T07:33:39,183 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-13T07:33:39,184 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-13T07:33:39,184 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-13T07:33:39,184 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-13T07:33:39,185 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-13T07:33:39,185 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-13T07:33:39,186 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T07:33:39,186 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T07:33:39,186 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T07:33:39,186 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T07:33:39,187 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T07:33:39,187 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T07:33:39,187 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T07:33:39,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741923_1099 (size=127628) 2024-12-13T07:33:39,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741923_1099 (size=127628) 2024-12-13T07:33:39,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741923_1099 (size=127628) 2024-12-13T07:33:39,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741924_1100 (size=2172137) 2024-12-13T07:33:39,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741924_1100 (size=2172137) 2024-12-13T07:33:39,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741924_1100 (size=2172137) 2024-12-13T07:33:39,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741925_1101 (size=213228) 2024-12-13T07:33:39,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741925_1101 (size=213228) 2024-12-13T07:33:39,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741925_1101 (size=213228) 2024-12-13T07:33:39,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741926_1102 (size=1877034) 2024-12-13T07:33:39,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741926_1102 (size=1877034) 2024-12-13T07:33:39,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741926_1102 (size=1877034) 2024-12-13T07:33:39,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741927_1103 (size=533455) 2024-12-13T07:33:39,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741927_1103 (size=533455) 2024-12-13T07:33:39,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741927_1103 (size=533455) 2024-12-13T07:33:39,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741928_1104 (size=7280644) 2024-12-13T07:33:39,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741928_1104 (size=7280644) 2024-12-13T07:33:39,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741928_1104 (size=7280644) 2024-12-13T07:33:39,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741929_1105 (size=4188619) 2024-12-13T07:33:39,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741929_1105 (size=4188619) 2024-12-13T07:33:39,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741929_1105 (size=4188619) 2024-12-13T07:33:39,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741930_1106 (size=20406) 2024-12-13T07:33:39,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741930_1106 (size=20406) 2024-12-13T07:33:39,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741930_1106 (size=20406) 2024-12-13T07:33:39,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741931_1107 (size=75495) 2024-12-13T07:33:39,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741931_1107 (size=75495) 2024-12-13T07:33:39,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741931_1107 (size=75495) 2024-12-13T07:33:39,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741932_1108 (size=45609) 2024-12-13T07:33:39,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741932_1108 (size=45609) 2024-12-13T07:33:39,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741932_1108 (size=45609) 2024-12-13T07:33:39,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741933_1109 (size=110084) 2024-12-13T07:33:39,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741933_1109 (size=110084) 2024-12-13T07:33:39,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741933_1109 (size=110084) 2024-12-13T07:33:39,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741934_1110 (size=1323991) 2024-12-13T07:33:39,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741934_1110 (size=1323991) 2024-12-13T07:33:39,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741934_1110 (size=1323991) 2024-12-13T07:33:39,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741935_1111 (size=23076) 2024-12-13T07:33:39,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741935_1111 (size=23076) 2024-12-13T07:33:39,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741935_1111 (size=23076) 2024-12-13T07:33:40,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741936_1112 (size=126803) 2024-12-13T07:33:40,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741936_1112 (size=126803) 2024-12-13T07:33:40,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741936_1112 (size=126803) 2024-12-13T07:33:40,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741937_1113 (size=322274) 2024-12-13T07:33:40,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741937_1113 (size=322274) 2024-12-13T07:33:40,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741937_1113 (size=322274) 2024-12-13T07:33:40,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741938_1114 (size=1832290) 2024-12-13T07:33:40,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741938_1114 (size=1832290) 2024-12-13T07:33:40,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741938_1114 (size=1832290) 2024-12-13T07:33:40,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741939_1115 (size=30081) 2024-12-13T07:33:40,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741939_1115 (size=30081) 2024-12-13T07:33:40,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741939_1115 (size=30081) 2024-12-13T07:33:40,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741940_1116 (size=451756) 2024-12-13T07:33:40,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741940_1116 (size=451756) 2024-12-13T07:33:40,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741940_1116 (size=451756) 2024-12-13T07:33:40,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741941_1117 (size=53616) 2024-12-13T07:33:40,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741941_1117 (size=53616) 2024-12-13T07:33:40,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741941_1117 (size=53616) 2024-12-13T07:33:40,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741942_1118 (size=29229) 2024-12-13T07:33:40,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741942_1118 (size=29229) 2024-12-13T07:33:40,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741942_1118 (size=29229) 2024-12-13T07:33:40,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741943_1119 (size=169089) 2024-12-13T07:33:40,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741943_1119 (size=169089) 2024-12-13T07:33:40,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741943_1119 (size=169089) 2024-12-13T07:33:40,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741944_1120 (size=5175431) 2024-12-13T07:33:40,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741944_1120 (size=5175431) 2024-12-13T07:33:40,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741944_1120 (size=5175431) 2024-12-13T07:33:40,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741945_1121 (size=136454) 2024-12-13T07:33:40,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741945_1121 (size=136454) 2024-12-13T07:33:40,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741945_1121 (size=136454) 2024-12-13T07:33:40,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741946_1122 (size=907854) 2024-12-13T07:33:40,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741946_1122 (size=907854) 2024-12-13T07:33:40,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741946_1122 (size=907854) 2024-12-13T07:33:40,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741947_1123 (size=3317408) 2024-12-13T07:33:40,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741947_1123 (size=3317408) 2024-12-13T07:33:40,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741947_1123 (size=3317408) 2024-12-13T07:33:40,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741948_1124 (size=6350899) 2024-12-13T07:33:40,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741948_1124 (size=6350899) 2024-12-13T07:33:40,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741948_1124 (size=6350899) 2024-12-13T07:33:40,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741949_1125 (size=503880) 2024-12-13T07:33:40,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741949_1125 (size=503880) 2024-12-13T07:33:40,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741949_1125 (size=503880) 2024-12-13T07:33:40,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741950_1126 (size=4695811) 2024-12-13T07:33:40,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741950_1126 (size=4695811) 2024-12-13T07:33:40,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741950_1126 (size=4695811) 2024-12-13T07:33:40,464 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-13T07:33:40,468 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb-testExportWithResetTtl' hfile list 2024-12-13T07:33:40,471 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-13T07:33:40,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741951_1127 (size=324) 2024-12-13T07:33:40,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741951_1127 (size=324) 2024-12-13T07:33:40,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741951_1127 (size=324) 2024-12-13T07:33:40,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741952_1128 (size=15) 2024-12-13T07:33:40,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741952_1128 (size=15) 2024-12-13T07:33:40,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741952_1128 (size=15) 2024-12-13T07:33:40,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741953_1129 (size=304881) 2024-12-13T07:33:40,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741953_1129 (size=304881) 2024-12-13T07:33:40,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741953_1129 (size=304881) 2024-12-13T07:33:40,540 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-13T07:33:40,541 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-13T07:33:40,804 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0002_000001 (auth:SIMPLE) from 127.0.0.1:52480 2024-12-13T07:33:42,021 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-13T07:33:42,021 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-13T07:33:42,022 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-13T07:33:42,022 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-13T07:33:42,023 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-13T07:33:46,508 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0002_000001 (auth:SIMPLE) from 127.0.0.1:49898 2024-12-13T07:33:46,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741954_1130 (size=350555) 2024-12-13T07:33:46,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741954_1130 (size=350555) 2024-12-13T07:33:46,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741954_1130 (size=350555) 2024-12-13T07:33:47,263 INFO [master/9eb75de94ef3:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-13T07:33:47,263 INFO [master/9eb75de94ef3:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-13T07:33:47,526 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-13T07:33:48,773 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0002_000001 (auth:SIMPLE) from 127.0.0.1:52486 2024-12-13T07:33:51,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741955_1131 (size=8256) 2024-12-13T07:33:51,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741955_1131 (size=8256) 2024-12-13T07:33:51,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741955_1131 (size=8256) 2024-12-13T07:33:51,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741956_1132 (size=5356) 2024-12-13T07:33:51,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741956_1132 (size=5356) 2024-12-13T07:33:51,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741956_1132 (size=5356) 2024-12-13T07:33:51,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741957_1133 (size=17398) 2024-12-13T07:33:51,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741957_1133 (size=17398) 2024-12-13T07:33:51,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741957_1133 (size=17398) 2024-12-13T07:33:51,952 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-0_2/usercache/jenkins/appcache/application_1734075189398_0002/container_1734075189398_0002_01_000002/launch_container.sh] 2024-12-13T07:33:51,952 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-0_2/usercache/jenkins/appcache/application_1734075189398_0002/container_1734075189398_0002_01_000002/container_tokens] 2024-12-13T07:33:51,952 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-0_2/usercache/jenkins/appcache/application_1734075189398_0002/container_1734075189398_0002_01_000002/sysfs] 2024-12-13T07:33:51,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741958_1134 (size=461) 2024-12-13T07:33:51,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741958_1134 (size=461) 2024-12-13T07:33:51,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741958_1134 (size=461) 2024-12-13T07:33:52,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741959_1135 (size=17398) 2024-12-13T07:33:52,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741959_1135 (size=17398) 2024-12-13T07:33:52,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741959_1135 (size=17398) 2024-12-13T07:33:52,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741960_1136 (size=350555) 2024-12-13T07:33:52,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741960_1136 (size=350555) 2024-12-13T07:33:52,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741960_1136 (size=350555) 2024-12-13T07:33:52,063 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0002_000001 (auth:SIMPLE) from 127.0.0.1:50750 2024-12-13T07:33:53,787 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-13T07:33:53,789 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-13T07:33:53,800 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb-testExportWithResetTtl 2024-12-13T07:33:53,800 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-13T07:33:53,801 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-13T07:33:53,801 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-313103365_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-13T07:33:53,802 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-13T07:33:53,802 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-13T07:33:53,802 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-313103365_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075217960/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075217960/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-13T07:33:53,802 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075217960/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-13T07:33:53,802 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075217960/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-13T07:33:53,812 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testExportWithResetTtl 2024-12-13T07:33:53,812 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testExportWithResetTtl 2024-12-13T07:33:53,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testExportWithResetTtl 2024-12-13T07:33:53,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-13T07:33:53,816 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075233816"}]},"ts":"1734075233816"} 2024-12-13T07:33:53,818 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-13T07:33:53,881 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testExportWithResetTtl to state=DISABLING 2024-12-13T07:33:53,883 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testExportWithResetTtl}] 2024-12-13T07:33:53,885 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=51, ppid=50, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testExportWithResetTtl, region=6e0e38894b52df0e2e5917ba22ec2cc9, UNASSIGN}, {pid=52, ppid=50, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testExportWithResetTtl, region=3ff931d0ce225fde6a6203e81b6f91e7, UNASSIGN}] 2024-12-13T07:33:53,887 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=52, ppid=50, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testExportWithResetTtl, region=3ff931d0ce225fde6a6203e81b6f91e7, UNASSIGN 2024-12-13T07:33:53,887 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=51, ppid=50, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testExportWithResetTtl, region=6e0e38894b52df0e2e5917ba22ec2cc9, UNASSIGN 2024-12-13T07:33:53,889 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=52 updating hbase:meta row=3ff931d0ce225fde6a6203e81b6f91e7, regionState=CLOSING, regionLocation=9eb75de94ef3,42189,1734075182526 2024-12-13T07:33:53,891 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=51 updating hbase:meta row=6e0e38894b52df0e2e5917ba22ec2cc9, regionState=CLOSING, regionLocation=9eb75de94ef3,36021,1734075182342 2024-12-13T07:33:53,893 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T07:33:53,893 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=53, ppid=52, state=RUNNABLE; CloseRegionProcedure 3ff931d0ce225fde6a6203e81b6f91e7, server=9eb75de94ef3,42189,1734075182526}] 2024-12-13T07:33:53,894 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T07:33:53,894 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=51, state=RUNNABLE; CloseRegionProcedure 6e0e38894b52df0e2e5917ba22ec2cc9, server=9eb75de94ef3,36021,1734075182342}] 2024-12-13T07:33:53,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-13T07:33:54,046 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,42189,1734075182526 2024-12-13T07:33:54,047 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,36021,1734075182342 2024-12-13T07:33:54,047 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] handler.UnassignRegionHandler(124): Close 3ff931d0ce225fde6a6203e81b6f91e7 2024-12-13T07:33:54,047 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T07:33:54,047 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1681): Closing 3ff931d0ce225fde6a6203e81b6f91e7, disabling compactions & flushes 2024-12-13T07:33:54,047 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1703): Closing region testExportWithResetTtl,1,1734075216650.3ff931d0ce225fde6a6203e81b6f91e7. 2024-12-13T07:33:54,047 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] handler.UnassignRegionHandler(124): Close 6e0e38894b52df0e2e5917ba22ec2cc9 2024-12-13T07:33:54,047 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportWithResetTtl,1,1734075216650.3ff931d0ce225fde6a6203e81b6f91e7. 2024-12-13T07:33:54,047 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1791): Acquired close lock on testExportWithResetTtl,1,1734075216650.3ff931d0ce225fde6a6203e81b6f91e7. after waiting 0 ms 2024-12-13T07:33:54,047 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1801): Updates disabled for region testExportWithResetTtl,1,1734075216650.3ff931d0ce225fde6a6203e81b6f91e7. 2024-12-13T07:33:54,047 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T07:33:54,047 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1681): Closing 6e0e38894b52df0e2e5917ba22ec2cc9, disabling compactions & flushes 2024-12-13T07:33:54,047 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1703): Closing region testExportWithResetTtl,,1734075216650.6e0e38894b52df0e2e5917ba22ec2cc9. 2024-12-13T07:33:54,047 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportWithResetTtl,,1734075216650.6e0e38894b52df0e2e5917ba22ec2cc9. 2024-12-13T07:33:54,048 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1791): Acquired close lock on testExportWithResetTtl,,1734075216650.6e0e38894b52df0e2e5917ba22ec2cc9. after waiting 0 ms 2024-12-13T07:33:54,048 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1801): Updates disabled for region testExportWithResetTtl,,1734075216650.6e0e38894b52df0e2e5917ba22ec2cc9. 2024-12-13T07:33:54,052 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportWithResetTtl/6e0e38894b52df0e2e5917ba22ec2cc9/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-13T07:33:54,052 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportWithResetTtl/3ff931d0ce225fde6a6203e81b6f91e7/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-13T07:33:54,053 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T07:33:54,053 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T07:33:54,053 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1922): Closed testExportWithResetTtl,1,1734075216650.3ff931d0ce225fde6a6203e81b6f91e7. 2024-12-13T07:33:54,053 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1922): Closed testExportWithResetTtl,,1734075216650.6e0e38894b52df0e2e5917ba22ec2cc9. 2024-12-13T07:33:54,053 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1635): Region close journal for 6e0e38894b52df0e2e5917ba22ec2cc9: 2024-12-13T07:33:54,053 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1635): Region close journal for 3ff931d0ce225fde6a6203e81b6f91e7: 2024-12-13T07:33:54,055 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] handler.UnassignRegionHandler(170): Closed 3ff931d0ce225fde6a6203e81b6f91e7 2024-12-13T07:33:54,055 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=52 updating hbase:meta row=3ff931d0ce225fde6a6203e81b6f91e7, regionState=CLOSED 2024-12-13T07:33:54,055 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] handler.UnassignRegionHandler(170): Closed 6e0e38894b52df0e2e5917ba22ec2cc9 2024-12-13T07:33:54,057 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=51 updating hbase:meta row=6e0e38894b52df0e2e5917ba22ec2cc9, regionState=CLOSED 2024-12-13T07:33:54,059 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=53, resume processing ppid=52 2024-12-13T07:33:54,059 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, ppid=52, state=SUCCESS; CloseRegionProcedure 3ff931d0ce225fde6a6203e81b6f91e7, server=9eb75de94ef3,42189,1734075182526 in 164 msec 2024-12-13T07:33:54,060 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=50, state=SUCCESS; TransitRegionStateProcedure table=testExportWithResetTtl, region=3ff931d0ce225fde6a6203e81b6f91e7, UNASSIGN in 174 msec 2024-12-13T07:33:54,061 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=51 2024-12-13T07:33:54,062 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=51, state=SUCCESS; CloseRegionProcedure 6e0e38894b52df0e2e5917ba22ec2cc9, server=9eb75de94ef3,36021,1734075182342 in 165 msec 2024-12-13T07:33:54,062 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=51, resume processing ppid=50 2024-12-13T07:33:54,063 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, ppid=50, state=SUCCESS; TransitRegionStateProcedure table=testExportWithResetTtl, region=6e0e38894b52df0e2e5917ba22ec2cc9, UNASSIGN in 176 msec 2024-12-13T07:33:54,065 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-12-13T07:33:54,065 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; CloseTableRegionsProcedure table=testExportWithResetTtl in 180 msec 2024-12-13T07:33:54,066 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075234065"}]},"ts":"1734075234065"} 2024-12-13T07:33:54,067 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-13T07:33:54,081 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testExportWithResetTtl to state=DISABLED 2024-12-13T07:33:54,084 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; DisableTableProcedure table=testExportWithResetTtl in 270 msec 2024-12-13T07:33:54,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-13T07:33:54,118 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testExportWithResetTtl, procId: 49 completed 2024-12-13T07:33:54,119 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testExportWithResetTtl 2024-12-13T07:33:54,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testExportWithResetTtl 2024-12-13T07:33:54,121 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=55, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-13T07:33:54,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] access.PermissionStorage(259): Removing permissions of removed table testExportWithResetTtl 2024-12-13T07:33:54,122 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=55, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-13T07:33:54,124 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41895 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testExportWithResetTtl 2024-12-13T07:33:54,126 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportWithResetTtl/6e0e38894b52df0e2e5917ba22ec2cc9 2024-12-13T07:33:54,126 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportWithResetTtl/3ff931d0ce225fde6a6203e81b6f91e7 2024-12-13T07:33:54,127 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportWithResetTtl/3ff931d0ce225fde6a6203e81b6f91e7/cf, FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportWithResetTtl/3ff931d0ce225fde6a6203e81b6f91e7/recovered.edits] 2024-12-13T07:33:54,127 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportWithResetTtl/6e0e38894b52df0e2e5917ba22ec2cc9/cf, FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportWithResetTtl/6e0e38894b52df0e2e5917ba22ec2cc9/recovered.edits] 2024-12-13T07:33:54,131 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportWithResetTtl/3ff931d0ce225fde6a6203e81b6f91e7/cf/dc3991217cb347a681ba3b094363dc6f to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testExportWithResetTtl/3ff931d0ce225fde6a6203e81b6f91e7/cf/dc3991217cb347a681ba3b094363dc6f 2024-12-13T07:33:54,132 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportWithResetTtl/6e0e38894b52df0e2e5917ba22ec2cc9/cf/db89186da7684188849e9c5be53b7a47 to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testExportWithResetTtl/6e0e38894b52df0e2e5917ba22ec2cc9/cf/db89186da7684188849e9c5be53b7a47 2024-12-13T07:33:54,135 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportWithResetTtl/6e0e38894b52df0e2e5917ba22ec2cc9/recovered.edits/8.seqid to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testExportWithResetTtl/6e0e38894b52df0e2e5917ba22ec2cc9/recovered.edits/8.seqid 2024-12-13T07:33:54,135 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportWithResetTtl/3ff931d0ce225fde6a6203e81b6f91e7/recovered.edits/8.seqid to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testExportWithResetTtl/3ff931d0ce225fde6a6203e81b6f91e7/recovered.edits/8.seqid 2024-12-13T07:33:54,136 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportWithResetTtl/6e0e38894b52df0e2e5917ba22ec2cc9 2024-12-13T07:33:54,136 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportWithResetTtl/3ff931d0ce225fde6a6203e81b6f91e7 2024-12-13T07:33:54,136 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testExportWithResetTtl regions 2024-12-13T07:33:54,138 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=55, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-13T07:33:54,139 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-13T07:33:54,139 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-13T07:33:54,139 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-13T07:33:54,140 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-13T07:33:54,140 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-13T07:33:54,140 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-13T07:33:54,141 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-13T07:33:54,141 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-13T07:33:54,141 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testExportWithResetTtl from hbase:meta 2024-12-13T07:33:54,143 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(408): Removing 'testExportWithResetTtl' descriptor. 2024-12-13T07:33:54,144 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=55, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-13T07:33:54,144 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(398): Removing 'testExportWithResetTtl' from region states. 2024-12-13T07:33:54,144 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testExportWithResetTtl,,1734075216650.6e0e38894b52df0e2e5917ba22ec2cc9.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734075234144"}]},"ts":"9223372036854775807"} 2024-12-13T07:33:54,144 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testExportWithResetTtl,1,1734075216650.3ff931d0ce225fde6a6203e81b6f91e7.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734075234144"}]},"ts":"9223372036854775807"} 2024-12-13T07:33:54,146 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-13T07:33:54,146 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 6e0e38894b52df0e2e5917ba22ec2cc9, NAME => 'testExportWithResetTtl,,1734075216650.6e0e38894b52df0e2e5917ba22ec2cc9.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 3ff931d0ce225fde6a6203e81b6f91e7, NAME => 'testExportWithResetTtl,1,1734075216650.3ff931d0ce225fde6a6203e81b6f91e7.', STARTKEY => '1', ENDKEY => ''}] 2024-12-13T07:33:54,146 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(402): Marking 'testExportWithResetTtl' as deleted. 2024-12-13T07:33:54,147 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734075234147"}]},"ts":"9223372036854775807"} 2024-12-13T07:33:54,149 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table testExportWithResetTtl state from META 2024-12-13T07:33:54,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-13T07:33:54,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-13T07:33:54,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-13T07:33:54,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-13T07:33:54,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:33:54,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:33:54,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:33:54,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:33:54,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-13T07:33:54,173 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-13T07:33:54,173 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-13T07:33:54,173 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-13T07:33:54,173 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-13T07:33:54,173 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=55, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-13T07:33:54,174 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; DeleteTableProcedure table=testExportWithResetTtl in 55 msec 2024-12-13T07:33:54,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-13T07:33:54,267 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testExportWithResetTtl, procId: 55 completed 2024-12-13T07:33:54,267 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportWithResetTtl 2024-12-13T07:33:54,267 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportWithResetTtl 2024-12-13T07:33:54,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=56, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportWithResetTtl 2024-12-13T07:33:54,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=56 2024-12-13T07:33:54,270 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075234270"}]},"ts":"1734075234270"} 2024-12-13T07:33:54,272 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-13T07:33:54,281 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithResetTtl to state=DISABLING 2024-12-13T07:33:54,282 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=57, ppid=56, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl}] 2024-12-13T07:33:54,284 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=42d35628cca5707cd4a5e6b8ee4a1b9e, UNASSIGN}, {pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=96c2377ea1a745dd079d620502e62feb, UNASSIGN}] 2024-12-13T07:33:54,284 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=96c2377ea1a745dd079d620502e62feb, UNASSIGN 2024-12-13T07:33:54,285 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=42d35628cca5707cd4a5e6b8ee4a1b9e, UNASSIGN 2024-12-13T07:33:54,286 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=59 updating hbase:meta row=96c2377ea1a745dd079d620502e62feb, regionState=CLOSING, regionLocation=9eb75de94ef3,41895,1734075182430 2024-12-13T07:33:54,286 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=58 updating hbase:meta row=42d35628cca5707cd4a5e6b8ee4a1b9e, regionState=CLOSING, regionLocation=9eb75de94ef3,36021,1734075182342 2024-12-13T07:33:54,287 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T07:33:54,287 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; CloseRegionProcedure 96c2377ea1a745dd079d620502e62feb, server=9eb75de94ef3,41895,1734075182430}] 2024-12-13T07:33:54,288 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T07:33:54,289 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=61, ppid=58, state=RUNNABLE; CloseRegionProcedure 42d35628cca5707cd4a5e6b8ee4a1b9e, server=9eb75de94ef3,36021,1734075182342}] 2024-12-13T07:33:54,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=56 2024-12-13T07:33:54,440 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,41895,1734075182430 2024-12-13T07:33:54,441 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(124): Close 96c2377ea1a745dd079d620502e62feb 2024-12-13T07:33:54,441 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T07:33:54,441 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,36021,1734075182342 2024-12-13T07:33:54,442 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1681): Closing 96c2377ea1a745dd079d620502e62feb, disabling compactions & flushes 2024-12-13T07:33:54,442 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1703): Closing region testtb-testExportWithResetTtl,1,1734075213690.96c2377ea1a745dd079d620502e62feb. 2024-12-13T07:33:54,442 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithResetTtl,1,1734075213690.96c2377ea1a745dd079d620502e62feb. 2024-12-13T07:33:54,442 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithResetTtl,1,1734075213690.96c2377ea1a745dd079d620502e62feb. after waiting 0 ms 2024-12-13T07:33:54,442 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithResetTtl,1,1734075213690.96c2377ea1a745dd079d620502e62feb. 2024-12-13T07:33:54,442 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(124): Close 42d35628cca5707cd4a5e6b8ee4a1b9e 2024-12-13T07:33:54,442 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T07:33:54,442 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1681): Closing 42d35628cca5707cd4a5e6b8ee4a1b9e, disabling compactions & flushes 2024-12-13T07:33:54,442 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1703): Closing region testtb-testExportWithResetTtl,,1734075213690.42d35628cca5707cd4a5e6b8ee4a1b9e. 2024-12-13T07:33:54,442 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithResetTtl,,1734075213690.42d35628cca5707cd4a5e6b8ee4a1b9e. 2024-12-13T07:33:54,442 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithResetTtl,,1734075213690.42d35628cca5707cd4a5e6b8ee4a1b9e. after waiting 0 ms 2024-12-13T07:33:54,442 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithResetTtl,,1734075213690.42d35628cca5707cd4a5e6b8ee4a1b9e. 2024-12-13T07:33:54,446 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithResetTtl/96c2377ea1a745dd079d620502e62feb/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T07:33:54,446 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithResetTtl/42d35628cca5707cd4a5e6b8ee4a1b9e/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T07:33:54,447 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T07:33:54,447 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T07:33:54,447 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1922): Closed testtb-testExportWithResetTtl,,1734075213690.42d35628cca5707cd4a5e6b8ee4a1b9e. 2024-12-13T07:33:54,447 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1922): Closed testtb-testExportWithResetTtl,1,1734075213690.96c2377ea1a745dd079d620502e62feb. 2024-12-13T07:33:54,447 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1635): Region close journal for 42d35628cca5707cd4a5e6b8ee4a1b9e: 2024-12-13T07:33:54,447 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1635): Region close journal for 96c2377ea1a745dd079d620502e62feb: 2024-12-13T07:33:54,449 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(170): Closed 96c2377ea1a745dd079d620502e62feb 2024-12-13T07:33:54,450 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=59 updating hbase:meta row=96c2377ea1a745dd079d620502e62feb, regionState=CLOSED 2024-12-13T07:33:54,450 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(170): Closed 42d35628cca5707cd4a5e6b8ee4a1b9e 2024-12-13T07:33:54,450 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=58 updating hbase:meta row=42d35628cca5707cd4a5e6b8ee4a1b9e, regionState=CLOSED 2024-12-13T07:33:54,453 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-12-13T07:33:54,453 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; CloseRegionProcedure 96c2377ea1a745dd079d620502e62feb, server=9eb75de94ef3,41895,1734075182430 in 164 msec 2024-12-13T07:33:54,453 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=61, resume processing ppid=58 2024-12-13T07:33:54,453 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, ppid=58, state=SUCCESS; CloseRegionProcedure 42d35628cca5707cd4a5e6b8ee4a1b9e, server=9eb75de94ef3,36021,1734075182342 in 164 msec 2024-12-13T07:33:54,454 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, ppid=57, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=96c2377ea1a745dd079d620502e62feb, UNASSIGN in 169 msec 2024-12-13T07:33:54,454 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-12-13T07:33:54,455 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=42d35628cca5707cd4a5e6b8ee4a1b9e, UNASSIGN in 170 msec 2024-12-13T07:33:54,456 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=57, resume processing ppid=56 2024-12-13T07:33:54,456 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, ppid=56, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl in 173 msec 2024-12-13T07:33:54,457 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075234457"}]},"ts":"1734075234457"} 2024-12-13T07:33:54,459 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-13T07:33:54,464 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithResetTtl to state=DISABLED 2024-12-13T07:33:54,466 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, state=SUCCESS; DisableTableProcedure table=testtb-testExportWithResetTtl in 197 msec 2024-12-13T07:33:54,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=56 2024-12-13T07:33:54,572 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportWithResetTtl, procId: 56 completed 2024-12-13T07:33:54,573 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportWithResetTtl 2024-12-13T07:33:54,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-13T07:33:54,575 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-13T07:33:54,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportWithResetTtl 2024-12-13T07:33:54,575 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=62, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-13T07:33:54,576 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41895 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportWithResetTtl 2024-12-13T07:33:54,579 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithResetTtl/42d35628cca5707cd4a5e6b8ee4a1b9e 2024-12-13T07:33:54,579 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithResetTtl/96c2377ea1a745dd079d620502e62feb 2024-12-13T07:33:54,581 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithResetTtl/42d35628cca5707cd4a5e6b8ee4a1b9e/cf, FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithResetTtl/42d35628cca5707cd4a5e6b8ee4a1b9e/recovered.edits] 2024-12-13T07:33:54,581 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithResetTtl/96c2377ea1a745dd079d620502e62feb/cf, FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithResetTtl/96c2377ea1a745dd079d620502e62feb/recovered.edits] 2024-12-13T07:33:54,585 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithResetTtl/42d35628cca5707cd4a5e6b8ee4a1b9e/cf/8ed2ab2721c7472c963eca6177b2b002 to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testExportWithResetTtl/42d35628cca5707cd4a5e6b8ee4a1b9e/cf/8ed2ab2721c7472c963eca6177b2b002 2024-12-13T07:33:54,585 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithResetTtl/96c2377ea1a745dd079d620502e62feb/cf/a20984d8785847ef968bd7b51be1dcd6 to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testExportWithResetTtl/96c2377ea1a745dd079d620502e62feb/cf/a20984d8785847ef968bd7b51be1dcd6 2024-12-13T07:33:54,588 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithResetTtl/42d35628cca5707cd4a5e6b8ee4a1b9e/recovered.edits/9.seqid to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testExportWithResetTtl/42d35628cca5707cd4a5e6b8ee4a1b9e/recovered.edits/9.seqid 2024-12-13T07:33:54,588 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithResetTtl/96c2377ea1a745dd079d620502e62feb/recovered.edits/9.seqid to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testExportWithResetTtl/96c2377ea1a745dd079d620502e62feb/recovered.edits/9.seqid 2024-12-13T07:33:54,589 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithResetTtl/42d35628cca5707cd4a5e6b8ee4a1b9e 2024-12-13T07:33:54,589 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithResetTtl/96c2377ea1a745dd079d620502e62feb 2024-12-13T07:33:54,589 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithResetTtl regions 2024-12-13T07:33:54,589 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-13T07:33:54,589 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-13T07:33:54,589 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-13T07:33:54,589 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-13T07:33:54,590 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-13T07:33:54,590 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-13T07:33:54,590 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-13T07:33:54,590 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-13T07:33:54,592 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=62, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-13T07:33:54,595 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportWithResetTtl from hbase:meta 2024-12-13T07:33:54,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-13T07:33:54,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-13T07:33:54,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-13T07:33:54,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-13T07:33:54,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:33:54,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:33:54,598 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportWithResetTtl' descriptor. 2024-12-13T07:33:54,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:33:54,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:33:54,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=62 2024-12-13T07:33:54,599 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=62, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-13T07:33:54,599 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportWithResetTtl' from region states. 2024-12-13T07:33:54,599 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,,1734075213690.42d35628cca5707cd4a5e6b8ee4a1b9e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734075234599"}]},"ts":"9223372036854775807"} 2024-12-13T07:33:54,600 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,1,1734075213690.96c2377ea1a745dd079d620502e62feb.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734075234599"}]},"ts":"9223372036854775807"} 2024-12-13T07:33:54,602 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-13T07:33:54,602 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 42d35628cca5707cd4a5e6b8ee4a1b9e, NAME => 'testtb-testExportWithResetTtl,,1734075213690.42d35628cca5707cd4a5e6b8ee4a1b9e.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 96c2377ea1a745dd079d620502e62feb, NAME => 'testtb-testExportWithResetTtl,1,1734075213690.96c2377ea1a745dd079d620502e62feb.', STARTKEY => '1', ENDKEY => ''}] 2024-12-13T07:33:54,602 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportWithResetTtl' as deleted. 2024-12-13T07:33:54,602 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734075234602"}]},"ts":"9223372036854775807"} 2024-12-13T07:33:54,604 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportWithResetTtl state from META 2024-12-13T07:33:54,614 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=62, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-13T07:33:54,615 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, state=SUCCESS; DeleteTableProcedure table=testtb-testExportWithResetTtl in 41 msec 2024-12-13T07:33:54,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=62 2024-12-13T07:33:54,700 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportWithResetTtl, procId: 62 completed 2024-12-13T07:33:54,711 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithResetTtl" 2024-12-13T07:33:54,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportWithResetTtl 2024-12-13T07:33:54,715 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb-testExportWithResetTtl" 2024-12-13T07:33:54,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb-testExportWithResetTtl 2024-12-13T07:33:54,719 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithResetTtl" 2024-12-13T07:33:54,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportWithResetTtl 2024-12-13T07:33:54,743 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=797 (was 777) Potentially hanging thread: IPC Client (1658146201) connection to localhost/127.0.0.1:41271 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: hconnection-0x6218feab-shared-pool-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_824479254_1 at /127.0.0.1:59916 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x6218feab-shared-pool-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2016 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-313103365_22 at /127.0.0.1:59936 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-313103365_22 at /127.0.0.1:42596 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x6218feab-shared-pool-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x6218feab-shared-pool-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x6218feab-shared-pool-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41271 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_824479254_1 at /127.0.0.1:45718 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x6218feab-shared-pool-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-313103365_22 at /127.0.0.1:45732 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 56702) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=811 (was 803) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=612 (was 594) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 17), AvailableMemoryMB=7285 (was 7695) 2024-12-13T07:33:54,744 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=797 is superior to 500 2024-12-13T07:33:54,763 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=797, OpenFileDescriptor=811, MaxFileDescriptor=1048576, SystemLoadAverage=612, ProcessCount=17, AvailableMemoryMB=7282 2024-12-13T07:33:54,763 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=797 is superior to 500 2024-12-13T07:33:54,765 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T07:33:54,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportFileSystemState 2024-12-13T07:33:54,767 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T07:33:54,767 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:33:54,767 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemState" procId is: 63 2024-12-13T07:33:54,768 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T07:33:54,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-13T07:33:54,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741961_1137 (size=407) 2024-12-13T07:33:54,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741961_1137 (size=407) 2024-12-13T07:33:54,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741961_1137 (size=407) 2024-12-13T07:33:54,777 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => c211b11636a7ab2928dd936a1bcf7e13, NAME => 'testtb-testExportFileSystemState,,1734075234764.c211b11636a7ab2928dd936a1bcf7e13.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:33:54,778 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1798734e2793477685c52953afdcb1c8, NAME => 'testtb-testExportFileSystemState,1,1734075234764.1798734e2793477685c52953afdcb1c8.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:33:54,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741962_1138 (size=68) 2024-12-13T07:33:54,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741962_1138 (size=68) 2024-12-13T07:33:54,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741962_1138 (size=68) 2024-12-13T07:33:54,793 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemState,1,1734075234764.1798734e2793477685c52953afdcb1c8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:33:54,794 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1681): Closing 1798734e2793477685c52953afdcb1c8, disabling compactions & flushes 2024-12-13T07:33:54,794 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemState,1,1734075234764.1798734e2793477685c52953afdcb1c8. 2024-12-13T07:33:54,794 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemState,1,1734075234764.1798734e2793477685c52953afdcb1c8. 2024-12-13T07:33:54,794 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemState,1,1734075234764.1798734e2793477685c52953afdcb1c8. after waiting 0 ms 2024-12-13T07:33:54,794 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemState,1,1734075234764.1798734e2793477685c52953afdcb1c8. 2024-12-13T07:33:54,794 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemState,1,1734075234764.1798734e2793477685c52953afdcb1c8. 2024-12-13T07:33:54,794 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1635): Region close journal for 1798734e2793477685c52953afdcb1c8: 2024-12-13T07:33:54,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741963_1139 (size=68) 2024-12-13T07:33:54,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741963_1139 (size=68) 2024-12-13T07:33:54,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741963_1139 (size=68) 2024-12-13T07:33:54,801 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemState,,1734075234764.c211b11636a7ab2928dd936a1bcf7e13.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:33:54,801 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1681): Closing c211b11636a7ab2928dd936a1bcf7e13, disabling compactions & flushes 2024-12-13T07:33:54,801 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemState,,1734075234764.c211b11636a7ab2928dd936a1bcf7e13. 2024-12-13T07:33:54,801 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemState,,1734075234764.c211b11636a7ab2928dd936a1bcf7e13. 2024-12-13T07:33:54,801 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemState,,1734075234764.c211b11636a7ab2928dd936a1bcf7e13. after waiting 0 ms 2024-12-13T07:33:54,801 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemState,,1734075234764.c211b11636a7ab2928dd936a1bcf7e13. 2024-12-13T07:33:54,801 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemState,,1734075234764.c211b11636a7ab2928dd936a1bcf7e13. 2024-12-13T07:33:54,801 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1635): Region close journal for c211b11636a7ab2928dd936a1bcf7e13: 2024-12-13T07:33:54,802 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T07:33:54,803 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,1,1734075234764.1798734e2793477685c52953afdcb1c8.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1734075234803"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734075234803"}]},"ts":"1734075234803"} 2024-12-13T07:33:54,803 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,,1734075234764.c211b11636a7ab2928dd936a1bcf7e13.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1734075234803"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734075234803"}]},"ts":"1734075234803"} 2024-12-13T07:33:54,805 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-13T07:33:54,806 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T07:33:54,806 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075234806"}]},"ts":"1734075234806"} 2024-12-13T07:33:54,808 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemState, state=ENABLING in hbase:meta 2024-12-13T07:33:54,823 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(202): Hosts are {9eb75de94ef3=0} racks are {/default-rack=0} 2024-12-13T07:33:54,825 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-13T07:33:54,825 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-13T07:33:54,825 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-13T07:33:54,825 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-13T07:33:54,825 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-13T07:33:54,825 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-13T07:33:54,825 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-13T07:33:54,825 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=c211b11636a7ab2928dd936a1bcf7e13, ASSIGN}, {pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=1798734e2793477685c52953afdcb1c8, ASSIGN}] 2024-12-13T07:33:54,827 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=1798734e2793477685c52953afdcb1c8, ASSIGN 2024-12-13T07:33:54,827 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=c211b11636a7ab2928dd936a1bcf7e13, ASSIGN 2024-12-13T07:33:54,827 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=c211b11636a7ab2928dd936a1bcf7e13, ASSIGN; state=OFFLINE, location=9eb75de94ef3,42189,1734075182526; forceNewPlan=false, retain=false 2024-12-13T07:33:54,827 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=1798734e2793477685c52953afdcb1c8, ASSIGN; state=OFFLINE, location=9eb75de94ef3,36021,1734075182342; forceNewPlan=false, retain=false 2024-12-13T07:33:54,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-13T07:33:54,978 INFO [9eb75de94ef3:33713 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-13T07:33:54,978 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=1798734e2793477685c52953afdcb1c8, regionState=OPENING, regionLocation=9eb75de94ef3,36021,1734075182342 2024-12-13T07:33:54,978 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=64 updating hbase:meta row=c211b11636a7ab2928dd936a1bcf7e13, regionState=OPENING, regionLocation=9eb75de94ef3,42189,1734075182526 2024-12-13T07:33:54,980 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; OpenRegionProcedure 1798734e2793477685c52953afdcb1c8, server=9eb75de94ef3,36021,1734075182342}] 2024-12-13T07:33:54,981 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=67, ppid=64, state=RUNNABLE; OpenRegionProcedure c211b11636a7ab2928dd936a1bcf7e13, server=9eb75de94ef3,42189,1734075182526}] 2024-12-13T07:33:55,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-13T07:33:55,132 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,42189,1734075182526 2024-12-13T07:33:55,132 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,36021,1734075182342 2024-12-13T07:33:55,135 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemState,1,1734075234764.1798734e2793477685c52953afdcb1c8. 2024-12-13T07:33:55,135 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemState,,1734075234764.c211b11636a7ab2928dd936a1bcf7e13. 2024-12-13T07:33:55,135 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7285): Opening region: {ENCODED => 1798734e2793477685c52953afdcb1c8, NAME => 'testtb-testExportFileSystemState,1,1734075234764.1798734e2793477685c52953afdcb1c8.', STARTKEY => '1', ENDKEY => ''} 2024-12-13T07:33:55,135 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7285): Opening region: {ENCODED => c211b11636a7ab2928dd936a1bcf7e13, NAME => 'testtb-testExportFileSystemState,,1734075234764.c211b11636a7ab2928dd936a1bcf7e13.', STARTKEY => '', ENDKEY => '1'} 2024-12-13T07:33:55,136 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemState,,1734075234764.c211b11636a7ab2928dd936a1bcf7e13. service=AccessControlService 2024-12-13T07:33:55,136 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemState,1,1734075234764.1798734e2793477685c52953afdcb1c8. service=AccessControlService 2024-12-13T07:33:55,136 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T07:33:55,136 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T07:33:55,136 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState c211b11636a7ab2928dd936a1bcf7e13 2024-12-13T07:33:55,136 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState 1798734e2793477685c52953afdcb1c8 2024-12-13T07:33:55,136 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemState,1,1734075234764.1798734e2793477685c52953afdcb1c8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:33:55,136 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemState,,1734075234764.c211b11636a7ab2928dd936a1bcf7e13.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:33:55,136 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7327): checking encryption for c211b11636a7ab2928dd936a1bcf7e13 2024-12-13T07:33:55,136 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7327): checking encryption for 1798734e2793477685c52953afdcb1c8 2024-12-13T07:33:55,136 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7330): checking classloading for 1798734e2793477685c52953afdcb1c8 2024-12-13T07:33:55,136 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7330): checking classloading for c211b11636a7ab2928dd936a1bcf7e13 2024-12-13T07:33:55,138 INFO [StoreOpener-1798734e2793477685c52953afdcb1c8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 1798734e2793477685c52953afdcb1c8 2024-12-13T07:33:55,139 INFO [StoreOpener-c211b11636a7ab2928dd936a1bcf7e13-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c211b11636a7ab2928dd936a1bcf7e13 2024-12-13T07:33:55,140 INFO [StoreOpener-1798734e2793477685c52953afdcb1c8-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1798734e2793477685c52953afdcb1c8 columnFamilyName cf 2024-12-13T07:33:55,140 DEBUG [StoreOpener-1798734e2793477685c52953afdcb1c8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:33:55,140 INFO [StoreOpener-1798734e2793477685c52953afdcb1c8-1 {}] regionserver.HStore(327): Store=1798734e2793477685c52953afdcb1c8/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T07:33:55,141 INFO [StoreOpener-c211b11636a7ab2928dd936a1bcf7e13-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c211b11636a7ab2928dd936a1bcf7e13 columnFamilyName cf 2024-12-13T07:33:55,141 DEBUG [StoreOpener-c211b11636a7ab2928dd936a1bcf7e13-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:33:55,141 INFO [StoreOpener-c211b11636a7ab2928dd936a1bcf7e13-1 {}] regionserver.HStore(327): Store=c211b11636a7ab2928dd936a1bcf7e13/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T07:33:55,141 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemState/1798734e2793477685c52953afdcb1c8 2024-12-13T07:33:55,142 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemState/c211b11636a7ab2928dd936a1bcf7e13 2024-12-13T07:33:55,142 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemState/1798734e2793477685c52953afdcb1c8 2024-12-13T07:33:55,143 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemState/c211b11636a7ab2928dd936a1bcf7e13 2024-12-13T07:33:55,145 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1085): writing seq id for 1798734e2793477685c52953afdcb1c8 2024-12-13T07:33:55,145 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1085): writing seq id for c211b11636a7ab2928dd936a1bcf7e13 2024-12-13T07:33:55,147 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemState/c211b11636a7ab2928dd936a1bcf7e13/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T07:33:55,147 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemState/1798734e2793477685c52953afdcb1c8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T07:33:55,147 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1102): Opened c211b11636a7ab2928dd936a1bcf7e13; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69740026, jitterRate=0.03920736908912659}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T07:33:55,148 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1102): Opened 1798734e2793477685c52953afdcb1c8; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69436295, jitterRate=0.034681424498558044}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T07:33:55,148 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1001): Region open journal for 1798734e2793477685c52953afdcb1c8: 2024-12-13T07:33:55,148 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1001): Region open journal for c211b11636a7ab2928dd936a1bcf7e13: 2024-12-13T07:33:55,149 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemState,1,1734075234764.1798734e2793477685c52953afdcb1c8., pid=66, masterSystemTime=1734075235132 2024-12-13T07:33:55,149 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemState,,1734075234764.c211b11636a7ab2928dd936a1bcf7e13., pid=67, masterSystemTime=1734075235132 2024-12-13T07:33:55,151 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemState,1,1734075234764.1798734e2793477685c52953afdcb1c8. 2024-12-13T07:33:55,151 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemState,1,1734075234764.1798734e2793477685c52953afdcb1c8. 2024-12-13T07:33:55,151 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=1798734e2793477685c52953afdcb1c8, regionState=OPEN, openSeqNum=2, regionLocation=9eb75de94ef3,36021,1734075182342 2024-12-13T07:33:55,152 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemState,,1734075234764.c211b11636a7ab2928dd936a1bcf7e13. 2024-12-13T07:33:55,152 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemState,,1734075234764.c211b11636a7ab2928dd936a1bcf7e13. 2024-12-13T07:33:55,152 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=64 updating hbase:meta row=c211b11636a7ab2928dd936a1bcf7e13, regionState=OPEN, openSeqNum=2, regionLocation=9eb75de94ef3,42189,1734075182526 2024-12-13T07:33:55,155 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-12-13T07:33:55,155 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; OpenRegionProcedure 1798734e2793477685c52953afdcb1c8, server=9eb75de94ef3,36021,1734075182342 in 173 msec 2024-12-13T07:33:55,157 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, ppid=63, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=1798734e2793477685c52953afdcb1c8, ASSIGN in 330 msec 2024-12-13T07:33:55,158 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=67, resume processing ppid=64 2024-12-13T07:33:55,158 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, ppid=64, state=SUCCESS; OpenRegionProcedure c211b11636a7ab2928dd936a1bcf7e13, server=9eb75de94ef3,42189,1734075182526 in 175 msec 2024-12-13T07:33:55,160 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-12-13T07:33:55,160 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=c211b11636a7ab2928dd936a1bcf7e13, ASSIGN in 333 msec 2024-12-13T07:33:55,167 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T07:33:55,167 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075235167"}]},"ts":"1734075235167"} 2024-12-13T07:33:55,170 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemState, state=ENABLED in hbase:meta 2024-12-13T07:33:55,215 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T07:33:55,216 DEBUG [PEWorker-4 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportFileSystemState jenkins: RWXCA 2024-12-13T07:33:55,218 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41895 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-13T07:33:55,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:33:55,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:33:55,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:33:55,223 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:33:55,231 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-13T07:33:55,231 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-13T07:33:55,231 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-13T07:33:55,231 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-13T07:33:55,233 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; CreateTableProcedure table=testtb-testExportFileSystemState in 466 msec 2024-12-13T07:33:55,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-13T07:33:55,372 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportFileSystemState, procId: 63 completed 2024-12-13T07:33:55,372 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportFileSystemState get assigned. Timeout = 60000ms 2024-12-13T07:33:55,372 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T07:33:55,376 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportFileSystemState assigned to meta. Checking AM states. 2024-12-13T07:33:55,376 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T07:33:55,376 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportFileSystemState assigned. 2024-12-13T07:33:55,380 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-13T07:33:55,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734075235380 (current time:1734075235380). 2024-12-13T07:33:55,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-13T07:33:55,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-13T07:33:55,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T07:33:55,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x10e73ddd to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@40af4834 2024-12-13T07:33:55,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c1e6a1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:33:55,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:33:55,393 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60478, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:33:55,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x10e73ddd to 127.0.0.1:59810 2024-12-13T07:33:55,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:33:55,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x60f3eb7b to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@464e7b37 2024-12-13T07:33:55,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4f3379b1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:33:55,417 DEBUG [hconnection-0x5cb0b82a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:33:55,418 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60490, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:33:55,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:33:55,421 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42924, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:33:55,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x60f3eb7b to 127.0.0.1:59810 2024-12-13T07:33:55,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:33:55,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-13T07:33:55,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T07:33:55,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=68, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-13T07:33:55,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-13T07:33:55,425 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-13T07:33:55,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-13T07:33:55,426 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T07:33:55,428 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T07:33:55,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741964_1140 (size=170) 2024-12-13T07:33:55,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741964_1140 (size=170) 2024-12-13T07:33:55,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741964_1140 (size=170) 2024-12-13T07:33:55,439 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T07:33:55,440 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE; SnapshotRegionProcedure c211b11636a7ab2928dd936a1bcf7e13}, {pid=70, ppid=68, state=RUNNABLE; SnapshotRegionProcedure 1798734e2793477685c52953afdcb1c8}] 2024-12-13T07:33:55,441 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=70, ppid=68, state=RUNNABLE; SnapshotRegionProcedure 1798734e2793477685c52953afdcb1c8 2024-12-13T07:33:55,441 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=69, ppid=68, state=RUNNABLE; SnapshotRegionProcedure c211b11636a7ab2928dd936a1bcf7e13 2024-12-13T07:33:55,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-13T07:33:55,592 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,42189,1734075182526 2024-12-13T07:33:55,592 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,36021,1734075182342 2024-12-13T07:33:55,592 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36021 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=70 2024-12-13T07:33:55,592 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42189 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=69 2024-12-13T07:33:55,593 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1734075234764.1798734e2793477685c52953afdcb1c8. 2024-12-13T07:33:55,593 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1734075234764.c211b11636a7ab2928dd936a1bcf7e13. 2024-12-13T07:33:55,593 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.HRegion(2538): Flush status journal for 1798734e2793477685c52953afdcb1c8: 2024-12-13T07:33:55,593 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.HRegion(2538): Flush status journal for c211b11636a7ab2928dd936a1bcf7e13: 2024-12-13T07:33:55,593 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1734075234764.1798734e2793477685c52953afdcb1c8. for emptySnaptb0-testExportFileSystemState completed. 2024-12-13T07:33:55,593 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1734075234764.c211b11636a7ab2928dd936a1bcf7e13. for emptySnaptb0-testExportFileSystemState completed. 2024-12-13T07:33:55,594 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1734075234764.1798734e2793477685c52953afdcb1c8.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-13T07:33:55,594 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1734075234764.c211b11636a7ab2928dd936a1bcf7e13.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-13T07:33:55,594 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T07:33:55,594 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T07:33:55,594 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-13T07:33:55,594 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-13T07:33:55,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741965_1141 (size=71) 2024-12-13T07:33:55,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741965_1141 (size=71) 2024-12-13T07:33:55,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741965_1141 (size=71) 2024-12-13T07:33:55,610 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1734075234764.c211b11636a7ab2928dd936a1bcf7e13. 2024-12-13T07:33:55,610 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=69 2024-12-13T07:33:55,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.HMaster(4106): Remote procedure done, pid=69 2024-12-13T07:33:55,611 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region c211b11636a7ab2928dd936a1bcf7e13 2024-12-13T07:33:55,611 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=69, ppid=68, state=RUNNABLE; SnapshotRegionProcedure c211b11636a7ab2928dd936a1bcf7e13 2024-12-13T07:33:55,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741966_1142 (size=71) 2024-12-13T07:33:55,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741966_1142 (size=71) 2024-12-13T07:33:55,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741966_1142 (size=71) 2024-12-13T07:33:55,615 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1734075234764.1798734e2793477685c52953afdcb1c8. 2024-12-13T07:33:55,615 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-12-13T07:33:55,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.HMaster(4106): Remote procedure done, pid=70 2024-12-13T07:33:55,616 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region 1798734e2793477685c52953afdcb1c8 2024-12-13T07:33:55,616 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, ppid=68, state=SUCCESS; SnapshotRegionProcedure c211b11636a7ab2928dd936a1bcf7e13 in 174 msec 2024-12-13T07:33:55,616 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=70, ppid=68, state=RUNNABLE; SnapshotRegionProcedure 1798734e2793477685c52953afdcb1c8 2024-12-13T07:33:55,618 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=68 2024-12-13T07:33:55,618 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T07:33:55,618 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=68, state=SUCCESS; SnapshotRegionProcedure 1798734e2793477685c52953afdcb1c8 in 177 msec 2024-12-13T07:33:55,619 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T07:33:55,620 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T07:33:55,620 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemState 2024-12-13T07:33:55,621 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState 2024-12-13T07:33:55,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741967_1143 (size=552) 2024-12-13T07:33:55,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741967_1143 (size=552) 2024-12-13T07:33:55,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741967_1143 (size=552) 2024-12-13T07:33:55,641 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T07:33:55,647 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T07:33:55,648 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/emptySnaptb0-testExportFileSystemState 2024-12-13T07:33:55,649 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T07:33:55,649 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-13T07:33:55,651 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 226 msec 2024-12-13T07:33:55,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-13T07:33:55,729 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState, procId: 68 completed 2024-12-13T07:33:55,738 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42189 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemState,,1734075234764.c211b11636a7ab2928dd936a1bcf7e13. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T07:33:55,740 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36021 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemState,1,1734075234764.1798734e2793477685c52953afdcb1c8. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T07:33:55,744 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportFileSystemState 2024-12-13T07:33:55,745 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportFileSystemState,,1734075234764.c211b11636a7ab2928dd936a1bcf7e13. 2024-12-13T07:33:55,745 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T07:33:55,763 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-13T07:33:55,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734075235763 (current time:1734075235763). 2024-12-13T07:33:55,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-13T07:33:55,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-13T07:33:55,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T07:33:55,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x426f75e4 to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1b47f46b 2024-12-13T07:33:55,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17ddf6f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:33:55,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:33:55,809 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60504, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:33:55,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x426f75e4 to 127.0.0.1:59810 2024-12-13T07:33:55,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:33:55,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x05bfd31d to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@21ff8b6d 2024-12-13T07:33:55,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e96ae9a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:33:55,825 DEBUG [hconnection-0x4057999d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:33:55,826 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60520, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:33:55,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:33:55,829 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42936, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:33:55,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x05bfd31d to 127.0.0.1:59810 2024-12-13T07:33:55,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:33:55,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-13T07:33:55,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T07:33:55,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-13T07:33:55,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-13T07:33:55,833 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-13T07:33:55,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-13T07:33:55,834 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T07:33:55,836 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T07:33:55,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741968_1144 (size=165) 2024-12-13T07:33:55,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741968_1144 (size=165) 2024-12-13T07:33:55,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741968_1144 (size=165) 2024-12-13T07:33:55,845 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T07:33:55,845 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; SnapshotRegionProcedure c211b11636a7ab2928dd936a1bcf7e13}, {pid=73, ppid=71, state=RUNNABLE; SnapshotRegionProcedure 1798734e2793477685c52953afdcb1c8}] 2024-12-13T07:33:55,846 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=72, ppid=71, state=RUNNABLE; SnapshotRegionProcedure c211b11636a7ab2928dd936a1bcf7e13 2024-12-13T07:33:55,846 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=73, ppid=71, state=RUNNABLE; SnapshotRegionProcedure 1798734e2793477685c52953afdcb1c8 2024-12-13T07:33:55,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-13T07:33:55,997 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,42189,1734075182526 2024-12-13T07:33:55,997 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,36021,1734075182342 2024-12-13T07:33:55,997 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36021 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=73 2024-12-13T07:33:55,997 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42189 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=72 2024-12-13T07:33:55,998 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1734075234764.1798734e2793477685c52953afdcb1c8. 2024-12-13T07:33:55,998 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1734075234764.c211b11636a7ab2928dd936a1bcf7e13. 2024-12-13T07:33:55,998 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2837): Flushing 1798734e2793477685c52953afdcb1c8 1/1 column families, dataSize=3.19 KB heapSize=7.14 KB 2024-12-13T07:33:55,998 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing c211b11636a7ab2928dd936a1bcf7e13 1/1 column families, dataSize=65 B heapSize=400 B 2024-12-13T07:33:56,012 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemState/c211b11636a7ab2928dd936a1bcf7e13/.tmp/cf/5fc9bdeb3cb142eb9daa7b8071b6f5f1 is 69, key is 04d7dc60f350959712a06383356670f11/cf:q/1734075235737/Put/seqid=0 2024-12-13T07:33:56,012 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemState/1798734e2793477685c52953afdcb1c8/.tmp/cf/58b50c1a74164c718f58f7a9439cd908 is 71, key is 14f6fe185abac5dc16ffd83833fe9e2e/cf:q/1734075235739/Put/seqid=0 2024-12-13T07:33:56,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741969_1145 (size=8462) 2024-12-13T07:33:56,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741970_1146 (size=5149) 2024-12-13T07:33:56,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741970_1146 (size=5149) 2024-12-13T07:33:56,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741970_1146 (size=5149) 2024-12-13T07:33:56,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741969_1145 (size=8462) 2024-12-13T07:33:56,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741969_1145 (size=8462) 2024-12-13T07:33:56,021 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemState/c211b11636a7ab2928dd936a1bcf7e13/.tmp/cf/5fc9bdeb3cb142eb9daa7b8071b6f5f1 2024-12-13T07:33:56,021 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.19 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemState/1798734e2793477685c52953afdcb1c8/.tmp/cf/58b50c1a74164c718f58f7a9439cd908 2024-12-13T07:33:56,027 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemState/c211b11636a7ab2928dd936a1bcf7e13/.tmp/cf/5fc9bdeb3cb142eb9daa7b8071b6f5f1 as hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemState/c211b11636a7ab2928dd936a1bcf7e13/cf/5fc9bdeb3cb142eb9daa7b8071b6f5f1 2024-12-13T07:33:56,027 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemState/1798734e2793477685c52953afdcb1c8/.tmp/cf/58b50c1a74164c718f58f7a9439cd908 as hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemState/1798734e2793477685c52953afdcb1c8/cf/58b50c1a74164c718f58f7a9439cd908 2024-12-13T07:33:56,033 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemState/1798734e2793477685c52953afdcb1c8/cf/58b50c1a74164c718f58f7a9439cd908, entries=49, sequenceid=6, filesize=8.3 K 2024-12-13T07:33:56,034 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemState/c211b11636a7ab2928dd936a1bcf7e13/cf/5fc9bdeb3cb142eb9daa7b8071b6f5f1, entries=1, sequenceid=6, filesize=5.0 K 2024-12-13T07:33:56,034 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(3040): Finished flush of dataSize ~3.19 KB/3271, heapSize ~7.13 KB/7296, currentSize=0 B/0 for 1798734e2793477685c52953afdcb1c8 in 36ms, sequenceid=6, compaction requested=false 2024-12-13T07:33:56,034 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState' 2024-12-13T07:33:56,034 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~65 B/65, heapSize ~384 B/384, currentSize=0 B/0 for c211b11636a7ab2928dd936a1bcf7e13 in 36ms, sequenceid=6, compaction requested=false 2024-12-13T07:33:56,034 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState' 2024-12-13T07:33:56,035 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2538): Flush status journal for 1798734e2793477685c52953afdcb1c8: 2024-12-13T07:33:56,035 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for c211b11636a7ab2928dd936a1bcf7e13: 2024-12-13T07:33:56,035 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1734075234764.1798734e2793477685c52953afdcb1c8. for snaptb0-testExportFileSystemState completed. 2024-12-13T07:33:56,035 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1734075234764.c211b11636a7ab2928dd936a1bcf7e13. for snaptb0-testExportFileSystemState completed. 2024-12-13T07:33:56,035 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1734075234764.c211b11636a7ab2928dd936a1bcf7e13.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-13T07:33:56,035 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1734075234764.1798734e2793477685c52953afdcb1c8.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-13T07:33:56,035 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T07:33:56,035 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T07:33:56,035 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemState/c211b11636a7ab2928dd936a1bcf7e13/cf/5fc9bdeb3cb142eb9daa7b8071b6f5f1] hfiles 2024-12-13T07:33:56,035 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemState/1798734e2793477685c52953afdcb1c8/cf/58b50c1a74164c718f58f7a9439cd908] hfiles 2024-12-13T07:33:56,036 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemState/c211b11636a7ab2928dd936a1bcf7e13/cf/5fc9bdeb3cb142eb9daa7b8071b6f5f1 for snapshot=snaptb0-testExportFileSystemState 2024-12-13T07:33:56,036 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemState/1798734e2793477685c52953afdcb1c8/cf/58b50c1a74164c718f58f7a9439cd908 for snapshot=snaptb0-testExportFileSystemState 2024-12-13T07:33:56,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741972_1148 (size=110) 2024-12-13T07:33:56,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741971_1147 (size=110) 2024-12-13T07:33:56,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741972_1148 (size=110) 2024-12-13T07:33:56,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741971_1147 (size=110) 2024-12-13T07:33:56,050 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1734075234764.1798734e2793477685c52953afdcb1c8. 2024-12-13T07:33:56,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741972_1148 (size=110) 2024-12-13T07:33:56,050 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=73 2024-12-13T07:33:56,050 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1734075234764.c211b11636a7ab2928dd936a1bcf7e13. 2024-12-13T07:33:56,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741971_1147 (size=110) 2024-12-13T07:33:56,050 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-12-13T07:33:56,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.HMaster(4106): Remote procedure done, pid=73 2024-12-13T07:33:56,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-12-13T07:33:56,050 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region 1798734e2793477685c52953afdcb1c8 2024-12-13T07:33:56,050 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region c211b11636a7ab2928dd936a1bcf7e13 2024-12-13T07:33:56,050 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=72, ppid=71, state=RUNNABLE; SnapshotRegionProcedure c211b11636a7ab2928dd936a1bcf7e13 2024-12-13T07:33:56,051 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=73, ppid=71, state=RUNNABLE; SnapshotRegionProcedure 1798734e2793477685c52953afdcb1c8 2024-12-13T07:33:56,052 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; SnapshotRegionProcedure c211b11636a7ab2928dd936a1bcf7e13 in 206 msec 2024-12-13T07:33:56,053 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=73, resume processing ppid=71 2024-12-13T07:33:56,053 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, ppid=71, state=SUCCESS; SnapshotRegionProcedure 1798734e2793477685c52953afdcb1c8 in 206 msec 2024-12-13T07:33:56,053 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T07:33:56,054 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T07:33:56,054 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T07:33:56,054 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemState 2024-12-13T07:33:56,055 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-13T07:33:56,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741973_1149 (size=630) 2024-12-13T07:33:56,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741973_1149 (size=630) 2024-12-13T07:33:56,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741973_1149 (size=630) 2024-12-13T07:33:56,065 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T07:33:56,070 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T07:33:56,071 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-13T07:33:56,072 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T07:33:56,072 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-13T07:33:56,073 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 241 msec 2024-12-13T07:33:56,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-13T07:33:56,136 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState, procId: 71 completed 2024-12-13T07:33:56,136 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075236136 2024-12-13T07:33:56,136 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:41477, tgtDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075236136, rawTgtDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075236136, srcFsUri=hdfs://localhost:41477, srcDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:33:56,180 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:41477, inputRoot=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:33:56,180 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-313103365_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075236136, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075236136/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-13T07:33:56,182 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-13T07:33:56,187 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testExportFileSystemState to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075236136/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-13T07:33:56,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741974_1150 (size=630) 2024-12-13T07:33:56,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741974_1150 (size=630) 2024-12-13T07:33:56,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741974_1150 (size=630) 2024-12-13T07:33:56,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741975_1151 (size=165) 2024-12-13T07:33:56,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741975_1151 (size=165) 2024-12-13T07:33:56,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741975_1151 (size=165) 2024-12-13T07:33:56,211 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-13T07:33:56,211 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-13T07:33:56,212 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-13T07:33:56,212 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-13T07:33:57,109 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/hadoop-12759072105128289110.jar 2024-12-13T07:33:57,110 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-13T07:33:57,110 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-13T07:33:57,166 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/hadoop-2599971330610995046.jar 2024-12-13T07:33:57,167 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-13T07:33:57,167 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-13T07:33:57,167 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-13T07:33:57,167 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-13T07:33:57,168 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-13T07:33:57,168 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-13T07:33:57,168 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-13T07:33:57,168 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-13T07:33:57,168 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-13T07:33:57,169 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-13T07:33:57,169 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-13T07:33:57,169 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-13T07:33:57,169 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-13T07:33:57,169 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-13T07:33:57,169 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-13T07:33:57,170 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-13T07:33:57,170 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-13T07:33:57,170 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-13T07:33:57,170 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T07:33:57,170 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T07:33:57,171 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T07:33:57,171 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T07:33:57,171 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T07:33:57,171 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T07:33:57,171 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T07:33:57,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741976_1152 (size=127628) 2024-12-13T07:33:57,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741976_1152 (size=127628) 2024-12-13T07:33:57,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741976_1152 (size=127628) 2024-12-13T07:33:57,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741977_1153 (size=2172137) 2024-12-13T07:33:57,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741977_1153 (size=2172137) 2024-12-13T07:33:57,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741977_1153 (size=2172137) 2024-12-13T07:33:57,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741978_1154 (size=213228) 2024-12-13T07:33:57,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741978_1154 (size=213228) 2024-12-13T07:33:57,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741978_1154 (size=213228) 2024-12-13T07:33:57,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741979_1155 (size=1877034) 2024-12-13T07:33:57,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741979_1155 (size=1877034) 2024-12-13T07:33:57,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741979_1155 (size=1877034) 2024-12-13T07:33:57,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741980_1156 (size=533455) 2024-12-13T07:33:57,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741980_1156 (size=533455) 2024-12-13T07:33:57,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741980_1156 (size=533455) 2024-12-13T07:33:57,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741981_1157 (size=7280644) 2024-12-13T07:33:57,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741981_1157 (size=7280644) 2024-12-13T07:33:57,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741981_1157 (size=7280644) 2024-12-13T07:33:57,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741982_1158 (size=451756) 2024-12-13T07:33:57,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741982_1158 (size=451756) 2024-12-13T07:33:57,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741982_1158 (size=451756) 2024-12-13T07:33:57,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741983_1159 (size=4188619) 2024-12-13T07:33:57,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741983_1159 (size=4188619) 2024-12-13T07:33:57,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741983_1159 (size=4188619) 2024-12-13T07:33:57,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741984_1160 (size=20406) 2024-12-13T07:33:57,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741984_1160 (size=20406) 2024-12-13T07:33:57,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741984_1160 (size=20406) 2024-12-13T07:33:57,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741985_1161 (size=75495) 2024-12-13T07:33:57,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741985_1161 (size=75495) 2024-12-13T07:33:57,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741985_1161 (size=75495) 2024-12-13T07:33:57,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741986_1162 (size=45609) 2024-12-13T07:33:57,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741986_1162 (size=45609) 2024-12-13T07:33:57,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741986_1162 (size=45609) 2024-12-13T07:33:57,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741987_1163 (size=110084) 2024-12-13T07:33:57,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741987_1163 (size=110084) 2024-12-13T07:33:57,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741987_1163 (size=110084) 2024-12-13T07:33:57,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741988_1164 (size=1323991) 2024-12-13T07:33:57,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741988_1164 (size=1323991) 2024-12-13T07:33:57,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741988_1164 (size=1323991) 2024-12-13T07:33:57,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741989_1165 (size=23076) 2024-12-13T07:33:57,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741989_1165 (size=23076) 2024-12-13T07:33:57,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741989_1165 (size=23076) 2024-12-13T07:33:57,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741990_1166 (size=126803) 2024-12-13T07:33:57,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741990_1166 (size=126803) 2024-12-13T07:33:57,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741990_1166 (size=126803) 2024-12-13T07:33:57,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741991_1167 (size=322274) 2024-12-13T07:33:57,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741991_1167 (size=322274) 2024-12-13T07:33:57,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741991_1167 (size=322274) 2024-12-13T07:33:57,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741992_1168 (size=6350899) 2024-12-13T07:33:57,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741992_1168 (size=6350899) 2024-12-13T07:33:57,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741992_1168 (size=6350899) 2024-12-13T07:33:57,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741993_1169 (size=1832290) 2024-12-13T07:33:57,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741993_1169 (size=1832290) 2024-12-13T07:33:57,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741993_1169 (size=1832290) 2024-12-13T07:33:57,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741994_1170 (size=30081) 2024-12-13T07:33:57,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741994_1170 (size=30081) 2024-12-13T07:33:57,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741994_1170 (size=30081) 2024-12-13T07:33:57,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741995_1171 (size=53616) 2024-12-13T07:33:57,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741995_1171 (size=53616) 2024-12-13T07:33:57,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741995_1171 (size=53616) 2024-12-13T07:33:57,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741996_1172 (size=29229) 2024-12-13T07:33:57,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741996_1172 (size=29229) 2024-12-13T07:33:57,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741996_1172 (size=29229) 2024-12-13T07:33:57,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741997_1173 (size=169089) 2024-12-13T07:33:57,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741997_1173 (size=169089) 2024-12-13T07:33:57,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741997_1173 (size=169089) 2024-12-13T07:33:57,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741998_1174 (size=5175431) 2024-12-13T07:33:57,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741998_1174 (size=5175431) 2024-12-13T07:33:57,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741998_1174 (size=5175431) 2024-12-13T07:33:57,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741999_1175 (size=136454) 2024-12-13T07:33:57,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741999_1175 (size=136454) 2024-12-13T07:33:57,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741999_1175 (size=136454) 2024-12-13T07:33:57,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742000_1176 (size=907854) 2024-12-13T07:33:57,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742000_1176 (size=907854) 2024-12-13T07:33:57,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742000_1176 (size=907854) 2024-12-13T07:33:57,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742001_1177 (size=3317408) 2024-12-13T07:33:57,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742001_1177 (size=3317408) 2024-12-13T07:33:57,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742001_1177 (size=3317408) 2024-12-13T07:33:57,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742002_1178 (size=503880) 2024-12-13T07:33:57,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742002_1178 (size=503880) 2024-12-13T07:33:57,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742002_1178 (size=503880) 2024-12-13T07:33:58,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742003_1179 (size=4695811) 2024-12-13T07:33:58,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742003_1179 (size=4695811) 2024-12-13T07:33:58,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742003_1179 (size=4695811) 2024-12-13T07:33:58,024 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-13T07:33:58,027 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportFileSystemState' hfile list 2024-12-13T07:33:58,030 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-13T07:33:58,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742004_1180 (size=344) 2024-12-13T07:33:58,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742004_1180 (size=344) 2024-12-13T07:33:58,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742004_1180 (size=344) 2024-12-13T07:33:58,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742005_1181 (size=15) 2024-12-13T07:33:58,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742005_1181 (size=15) 2024-12-13T07:33:58,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742005_1181 (size=15) 2024-12-13T07:33:58,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742006_1182 (size=304891) 2024-12-13T07:33:58,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742006_1182 (size=304891) 2024-12-13T07:33:58,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742006_1182 (size=304891) 2024-12-13T07:33:58,140 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-13T07:33:58,140 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-13T07:33:58,143 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0002_000001 (auth:SIMPLE) from 127.0.0.1:50766 2024-12-13T07:33:58,158 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-0_1/usercache/jenkins/appcache/application_1734075189398_0002/container_1734075189398_0002_01_000001/launch_container.sh] 2024-12-13T07:33:58,158 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-0_1/usercache/jenkins/appcache/application_1734075189398_0002/container_1734075189398_0002_01_000001/container_tokens] 2024-12-13T07:33:58,158 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-0_1/usercache/jenkins/appcache/application_1734075189398_0002/container_1734075189398_0002_01_000001/sysfs] 2024-12-13T07:33:58,252 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0003_000001 (auth:SIMPLE) from 127.0.0.1:42116 2024-12-13T07:33:59,589 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-13T07:34:00,630 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T07:34:02,021 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-13T07:34:02,021 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-13T07:34:02,022 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-13T07:34:02,022 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-13T07:34:03,354 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0003_000001 (auth:SIMPLE) from 127.0.0.1:59978 2024-12-13T07:34:03,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742007_1183 (size=350565) 2024-12-13T07:34:03,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742007_1183 (size=350565) 2024-12-13T07:34:03,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742007_1183 (size=350565) 2024-12-13T07:34:05,345 DEBUG [master/9eb75de94ef3:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 1798734e2793477685c52953afdcb1c8 changed from -1.0 to 0.0, refreshing cache 2024-12-13T07:34:05,345 DEBUG [master/9eb75de94ef3:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 4825f6265c91ad571de6370b4fecbf02 changed from -1.0 to 0.0, refreshing cache 2024-12-13T07:34:05,346 DEBUG [master/9eb75de94ef3:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 4ee0c6377199ce786a0b82a4e00c172f changed from -1.0 to 0.0, refreshing cache 2024-12-13T07:34:05,346 DEBUG [master/9eb75de94ef3:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region c211b11636a7ab2928dd936a1bcf7e13 changed from -1.0 to 0.0, refreshing cache 2024-12-13T07:34:05,653 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0003_000001 (auth:SIMPLE) from 127.0.0.1:58068 2024-12-13T07:34:07,525 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-13T07:34:09,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742008_1184 (size=8462) 2024-12-13T07:34:09,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742008_1184 (size=8462) 2024-12-13T07:34:09,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742008_1184 (size=8462) 2024-12-13T07:34:09,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742009_1185 (size=5149) 2024-12-13T07:34:09,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742009_1185 (size=5149) 2024-12-13T07:34:09,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742009_1185 (size=5149) 2024-12-13T07:34:09,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742010_1186 (size=17422) 2024-12-13T07:34:09,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742010_1186 (size=17422) 2024-12-13T07:34:09,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742010_1186 (size=17422) 2024-12-13T07:34:09,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742011_1187 (size=465) 2024-12-13T07:34:09,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742011_1187 (size=465) 2024-12-13T07:34:09,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742011_1187 (size=465) 2024-12-13T07:34:09,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742012_1188 (size=17422) 2024-12-13T07:34:09,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742012_1188 (size=17422) 2024-12-13T07:34:09,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742012_1188 (size=17422) 2024-12-13T07:34:09,834 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-0_1/usercache/jenkins/appcache/application_1734075189398_0003/container_1734075189398_0003_01_000002/launch_container.sh] 2024-12-13T07:34:09,834 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-0_1/usercache/jenkins/appcache/application_1734075189398_0003/container_1734075189398_0003_01_000002/container_tokens] 2024-12-13T07:34:09,835 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-0_1/usercache/jenkins/appcache/application_1734075189398_0003/container_1734075189398_0003_01_000002/sysfs] 2024-12-13T07:34:09,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742013_1189 (size=350565) 2024-12-13T07:34:09,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742013_1189 (size=350565) 2024-12-13T07:34:09,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742013_1189 (size=350565) 2024-12-13T07:34:09,877 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0003_000001 (auth:SIMPLE) from 127.0.0.1:36654 2024-12-13T07:34:11,239 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-13T07:34:11,240 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-13T07:34:11,251 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testExportFileSystemState 2024-12-13T07:34:11,251 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-13T07:34:11,252 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-13T07:34:11,252 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-313103365_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-13T07:34:11,256 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-13T07:34:11,256 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-13T07:34:11,256 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-313103365_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075236136/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075236136/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-13T07:34:11,257 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075236136/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-13T07:34:11,257 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075236136/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-13T07:34:11,269 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportFileSystemState 2024-12-13T07:34:11,269 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemState 2024-12-13T07:34:11,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=74, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportFileSystemState 2024-12-13T07:34:11,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=74 2024-12-13T07:34:11,273 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075251273"}]},"ts":"1734075251273"} 2024-12-13T07:34:11,274 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemState, state=DISABLING in hbase:meta 2024-12-13T07:34:11,318 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemState to state=DISABLING 2024-12-13T07:34:11,319 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=75, ppid=74, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportFileSystemState}] 2024-12-13T07:34:11,320 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=c211b11636a7ab2928dd936a1bcf7e13, UNASSIGN}, {pid=77, ppid=75, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=1798734e2793477685c52953afdcb1c8, UNASSIGN}] 2024-12-13T07:34:11,320 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=77, ppid=75, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=1798734e2793477685c52953afdcb1c8, UNASSIGN 2024-12-13T07:34:11,320 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=76, ppid=75, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=c211b11636a7ab2928dd936a1bcf7e13, UNASSIGN 2024-12-13T07:34:11,321 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=76 updating hbase:meta row=c211b11636a7ab2928dd936a1bcf7e13, regionState=CLOSING, regionLocation=9eb75de94ef3,42189,1734075182526 2024-12-13T07:34:11,321 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=77 updating hbase:meta row=1798734e2793477685c52953afdcb1c8, regionState=CLOSING, regionLocation=9eb75de94ef3,36021,1734075182342 2024-12-13T07:34:11,322 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T07:34:11,322 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; CloseRegionProcedure 1798734e2793477685c52953afdcb1c8, server=9eb75de94ef3,36021,1734075182342}] 2024-12-13T07:34:11,323 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T07:34:11,323 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=79, ppid=76, state=RUNNABLE; CloseRegionProcedure c211b11636a7ab2928dd936a1bcf7e13, server=9eb75de94ef3,42189,1734075182526}] 2024-12-13T07:34:11,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=74 2024-12-13T07:34:11,474 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,42189,1734075182526 2024-12-13T07:34:11,474 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,36021,1734075182342 2024-12-13T07:34:11,475 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] handler.UnassignRegionHandler(124): Close c211b11636a7ab2928dd936a1bcf7e13 2024-12-13T07:34:11,475 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] handler.UnassignRegionHandler(124): Close 1798734e2793477685c52953afdcb1c8 2024-12-13T07:34:11,475 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T07:34:11,475 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T07:34:11,475 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1681): Closing c211b11636a7ab2928dd936a1bcf7e13, disabling compactions & flushes 2024-12-13T07:34:11,475 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1681): Closing 1798734e2793477685c52953afdcb1c8, disabling compactions & flushes 2024-12-13T07:34:11,475 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemState,1,1734075234764.1798734e2793477685c52953afdcb1c8. 2024-12-13T07:34:11,475 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemState,,1734075234764.c211b11636a7ab2928dd936a1bcf7e13. 2024-12-13T07:34:11,475 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemState,,1734075234764.c211b11636a7ab2928dd936a1bcf7e13. 2024-12-13T07:34:11,475 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemState,1,1734075234764.1798734e2793477685c52953afdcb1c8. 2024-12-13T07:34:11,475 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemState,,1734075234764.c211b11636a7ab2928dd936a1bcf7e13. after waiting 0 ms 2024-12-13T07:34:11,475 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemState,1,1734075234764.1798734e2793477685c52953afdcb1c8. after waiting 0 ms 2024-12-13T07:34:11,475 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemState,,1734075234764.c211b11636a7ab2928dd936a1bcf7e13. 2024-12-13T07:34:11,475 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemState,1,1734075234764.1798734e2793477685c52953afdcb1c8. 2024-12-13T07:34:11,482 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemState/1798734e2793477685c52953afdcb1c8/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T07:34:11,482 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemState/c211b11636a7ab2928dd936a1bcf7e13/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T07:34:11,483 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T07:34:11,483 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T07:34:11,483 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemState,,1734075234764.c211b11636a7ab2928dd936a1bcf7e13. 2024-12-13T07:34:11,483 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1635): Region close journal for c211b11636a7ab2928dd936a1bcf7e13: 2024-12-13T07:34:11,483 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemState,1,1734075234764.1798734e2793477685c52953afdcb1c8. 2024-12-13T07:34:11,483 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1635): Region close journal for 1798734e2793477685c52953afdcb1c8: 2024-12-13T07:34:11,485 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] handler.UnassignRegionHandler(170): Closed c211b11636a7ab2928dd936a1bcf7e13 2024-12-13T07:34:11,485 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=76 updating hbase:meta row=c211b11636a7ab2928dd936a1bcf7e13, regionState=CLOSED 2024-12-13T07:34:11,485 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] handler.UnassignRegionHandler(170): Closed 1798734e2793477685c52953afdcb1c8 2024-12-13T07:34:11,486 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=77 updating hbase:meta row=1798734e2793477685c52953afdcb1c8, regionState=CLOSED 2024-12-13T07:34:11,488 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=79, resume processing ppid=76 2024-12-13T07:34:11,488 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, ppid=76, state=SUCCESS; CloseRegionProcedure c211b11636a7ab2928dd936a1bcf7e13, server=9eb75de94ef3,42189,1734075182526 in 164 msec 2024-12-13T07:34:11,489 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-12-13T07:34:11,489 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; CloseRegionProcedure 1798734e2793477685c52953afdcb1c8, server=9eb75de94ef3,36021,1734075182342 in 166 msec 2024-12-13T07:34:11,489 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=c211b11636a7ab2928dd936a1bcf7e13, UNASSIGN in 169 msec 2024-12-13T07:34:11,490 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=77, resume processing ppid=75 2024-12-13T07:34:11,490 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, ppid=75, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=1798734e2793477685c52953afdcb1c8, UNASSIGN in 169 msec 2024-12-13T07:34:11,492 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=75, resume processing ppid=74 2024-12-13T07:34:11,492 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, ppid=74, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportFileSystemState in 173 msec 2024-12-13T07:34:11,493 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075251493"}]},"ts":"1734075251493"} 2024-12-13T07:34:11,494 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemState, state=DISABLED in hbase:meta 2024-12-13T07:34:11,504 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemState to state=DISABLED 2024-12-13T07:34:11,506 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, state=SUCCESS; DisableTableProcedure table=testtb-testExportFileSystemState in 236 msec 2024-12-13T07:34:11,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=74 2024-12-13T07:34:11,576 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemState, procId: 74 completed 2024-12-13T07:34:11,577 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemState 2024-12-13T07:34:11,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=80, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-13T07:34:11,579 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=80, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-13T07:34:11,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportFileSystemState 2024-12-13T07:34:11,580 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=80, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-13T07:34:11,581 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41895 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemState 2024-12-13T07:34:11,583 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemState/c211b11636a7ab2928dd936a1bcf7e13 2024-12-13T07:34:11,583 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemState/1798734e2793477685c52953afdcb1c8 2024-12-13T07:34:11,585 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemState/1798734e2793477685c52953afdcb1c8/cf, FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemState/1798734e2793477685c52953afdcb1c8/recovered.edits] 2024-12-13T07:34:11,585 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemState/c211b11636a7ab2928dd936a1bcf7e13/cf, FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemState/c211b11636a7ab2928dd936a1bcf7e13/recovered.edits] 2024-12-13T07:34:11,588 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemState/1798734e2793477685c52953afdcb1c8/cf/58b50c1a74164c718f58f7a9439cd908 to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testExportFileSystemState/1798734e2793477685c52953afdcb1c8/cf/58b50c1a74164c718f58f7a9439cd908 2024-12-13T07:34:11,588 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemState/c211b11636a7ab2928dd936a1bcf7e13/cf/5fc9bdeb3cb142eb9daa7b8071b6f5f1 to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testExportFileSystemState/c211b11636a7ab2928dd936a1bcf7e13/cf/5fc9bdeb3cb142eb9daa7b8071b6f5f1 2024-12-13T07:34:11,591 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemState/1798734e2793477685c52953afdcb1c8/recovered.edits/9.seqid to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testExportFileSystemState/1798734e2793477685c52953afdcb1c8/recovered.edits/9.seqid 2024-12-13T07:34:11,591 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemState/c211b11636a7ab2928dd936a1bcf7e13/recovered.edits/9.seqid to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testExportFileSystemState/c211b11636a7ab2928dd936a1bcf7e13/recovered.edits/9.seqid 2024-12-13T07:34:11,592 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemState/1798734e2793477685c52953afdcb1c8 2024-12-13T07:34:11,592 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemState/c211b11636a7ab2928dd936a1bcf7e13 2024-12-13T07:34:11,592 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemState regions 2024-12-13T07:34:11,594 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=80, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-13T07:34:11,597 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportFileSystemState from hbase:meta 2024-12-13T07:34:11,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-13T07:34:11,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-13T07:34:11,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-13T07:34:11,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-13T07:34:11,599 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-13T07:34:11,599 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-13T07:34:11,600 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-13T07:34:11,600 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-13T07:34:11,601 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemState' descriptor. 2024-12-13T07:34:11,602 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=80, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-13T07:34:11,602 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemState' from region states. 2024-12-13T07:34:11,602 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,,1734075234764.c211b11636a7ab2928dd936a1bcf7e13.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734075251602"}]},"ts":"9223372036854775807"} 2024-12-13T07:34:11,602 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,1,1734075234764.1798734e2793477685c52953afdcb1c8.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734075251602"}]},"ts":"9223372036854775807"} 2024-12-13T07:34:11,605 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-13T07:34:11,605 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => c211b11636a7ab2928dd936a1bcf7e13, NAME => 'testtb-testExportFileSystemState,,1734075234764.c211b11636a7ab2928dd936a1bcf7e13.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 1798734e2793477685c52953afdcb1c8, NAME => 'testtb-testExportFileSystemState,1,1734075234764.1798734e2793477685c52953afdcb1c8.', STARTKEY => '1', ENDKEY => ''}] 2024-12-13T07:34:11,605 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemState' as deleted. 2024-12-13T07:34:11,605 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734075251605"}]},"ts":"9223372036854775807"} 2024-12-13T07:34:11,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-13T07:34:11,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-13T07:34:11,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:34:11,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:34:11,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-13T07:34:11,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-13T07:34:11,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:34:11,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:34:11,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=80 2024-12-13T07:34:11,608 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportFileSystemState state from META 2024-12-13T07:34:11,615 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=80, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-13T07:34:11,616 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, state=SUCCESS; DeleteTableProcedure table=testtb-testExportFileSystemState in 38 msec 2024-12-13T07:34:11,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=80 2024-12-13T07:34:11,715 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportFileSystemState, procId: 80 completed 2024-12-13T07:34:11,730 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemState" 2024-12-13T07:34:11,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportFileSystemState 2024-12-13T07:34:11,736 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemState" 2024-12-13T07:34:11,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportFileSystemState 2024-12-13T07:34:11,776 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=792 (was 797), OpenFileDescriptor=817 (was 811) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=589 (was 612), ProcessCount=20 (was 17) - ProcessCount LEAK? -, AvailableMemoryMB=6857 (was 7282) 2024-12-13T07:34:11,776 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=792 is superior to 500 2024-12-13T07:34:11,800 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=792, OpenFileDescriptor=817, MaxFileDescriptor=1048576, SystemLoadAverage=589, ProcessCount=20, AvailableMemoryMB=6856 2024-12-13T07:34:11,800 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=792 is superior to 500 2024-12-13T07:34:11,803 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T07:34:11,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testConsecutiveExports 2024-12-13T07:34:11,808 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T07:34:11,809 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:34:11,809 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testConsecutiveExports" procId is: 81 2024-12-13T07:34:11,810 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T07:34:11,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-13T07:34:11,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742014_1190 (size=404) 2024-12-13T07:34:11,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742014_1190 (size=404) 2024-12-13T07:34:11,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742014_1190 (size=404) 2024-12-13T07:34:11,835 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => f6a1d6d458cf0eb73804984414865f1a, NAME => 'testtb-testConsecutiveExports,,1734075251802.f6a1d6d458cf0eb73804984414865f1a.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:34:11,840 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => afb548b9703253e86c3615e776d3eca7, NAME => 'testtb-testConsecutiveExports,1,1734075251802.afb548b9703253e86c3615e776d3eca7.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:34:11,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742015_1191 (size=65) 2024-12-13T07:34:11,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742015_1191 (size=65) 2024-12-13T07:34:11,872 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testConsecutiveExports,,1734075251802.f6a1d6d458cf0eb73804984414865f1a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:34:11,872 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1681): Closing f6a1d6d458cf0eb73804984414865f1a, disabling compactions & flushes 2024-12-13T07:34:11,872 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testConsecutiveExports,,1734075251802.f6a1d6d458cf0eb73804984414865f1a. 2024-12-13T07:34:11,872 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testConsecutiveExports,,1734075251802.f6a1d6d458cf0eb73804984414865f1a. 2024-12-13T07:34:11,872 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testConsecutiveExports,,1734075251802.f6a1d6d458cf0eb73804984414865f1a. after waiting 0 ms 2024-12-13T07:34:11,872 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testConsecutiveExports,,1734075251802.f6a1d6d458cf0eb73804984414865f1a. 2024-12-13T07:34:11,872 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testConsecutiveExports,,1734075251802.f6a1d6d458cf0eb73804984414865f1a. 2024-12-13T07:34:11,872 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1635): Region close journal for f6a1d6d458cf0eb73804984414865f1a: 2024-12-13T07:34:11,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742015_1191 (size=65) 2024-12-13T07:34:11,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742016_1192 (size=65) 2024-12-13T07:34:11,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742016_1192 (size=65) 2024-12-13T07:34:11,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742016_1192 (size=65) 2024-12-13T07:34:11,887 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testConsecutiveExports,1,1734075251802.afb548b9703253e86c3615e776d3eca7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:34:11,887 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1681): Closing afb548b9703253e86c3615e776d3eca7, disabling compactions & flushes 2024-12-13T07:34:11,887 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testConsecutiveExports,1,1734075251802.afb548b9703253e86c3615e776d3eca7. 2024-12-13T07:34:11,887 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testConsecutiveExports,1,1734075251802.afb548b9703253e86c3615e776d3eca7. 2024-12-13T07:34:11,887 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testConsecutiveExports,1,1734075251802.afb548b9703253e86c3615e776d3eca7. after waiting 0 ms 2024-12-13T07:34:11,887 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testConsecutiveExports,1,1734075251802.afb548b9703253e86c3615e776d3eca7. 2024-12-13T07:34:11,887 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testConsecutiveExports,1,1734075251802.afb548b9703253e86c3615e776d3eca7. 2024-12-13T07:34:11,887 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1635): Region close journal for afb548b9703253e86c3615e776d3eca7: 2024-12-13T07:34:11,888 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T07:34:11,889 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,,1734075251802.f6a1d6d458cf0eb73804984414865f1a.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1734075251888"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734075251888"}]},"ts":"1734075251888"} 2024-12-13T07:34:11,889 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,1,1734075251802.afb548b9703253e86c3615e776d3eca7.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1734075251888"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734075251888"}]},"ts":"1734075251888"} 2024-12-13T07:34:11,892 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-13T07:34:11,893 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T07:34:11,893 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075251893"}]},"ts":"1734075251893"} 2024-12-13T07:34:11,895 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testConsecutiveExports, state=ENABLING in hbase:meta 2024-12-13T07:34:11,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-13T07:34:11,914 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(202): Hosts are {9eb75de94ef3=0} racks are {/default-rack=0} 2024-12-13T07:34:11,916 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-13T07:34:11,916 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-13T07:34:11,916 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-13T07:34:11,916 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-13T07:34:11,916 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-13T07:34:11,916 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-13T07:34:11,916 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-13T07:34:11,916 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=f6a1d6d458cf0eb73804984414865f1a, ASSIGN}, {pid=83, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=afb548b9703253e86c3615e776d3eca7, ASSIGN}] 2024-12-13T07:34:11,917 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=83, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=afb548b9703253e86c3615e776d3eca7, ASSIGN 2024-12-13T07:34:11,917 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=82, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=f6a1d6d458cf0eb73804984414865f1a, ASSIGN 2024-12-13T07:34:11,918 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=83, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=afb548b9703253e86c3615e776d3eca7, ASSIGN; state=OFFLINE, location=9eb75de94ef3,36021,1734075182342; forceNewPlan=false, retain=false 2024-12-13T07:34:11,918 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=82, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=f6a1d6d458cf0eb73804984414865f1a, ASSIGN; state=OFFLINE, location=9eb75de94ef3,41895,1734075182430; forceNewPlan=false, retain=false 2024-12-13T07:34:12,021 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-13T07:34:12,068 INFO [9eb75de94ef3:33713 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-13T07:34:12,068 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=83 updating hbase:meta row=afb548b9703253e86c3615e776d3eca7, regionState=OPENING, regionLocation=9eb75de94ef3,36021,1734075182342 2024-12-13T07:34:12,068 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=82 updating hbase:meta row=f6a1d6d458cf0eb73804984414865f1a, regionState=OPENING, regionLocation=9eb75de94ef3,41895,1734075182430 2024-12-13T07:34:12,070 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; OpenRegionProcedure afb548b9703253e86c3615e776d3eca7, server=9eb75de94ef3,36021,1734075182342}] 2024-12-13T07:34:12,071 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=85, ppid=82, state=RUNNABLE; OpenRegionProcedure f6a1d6d458cf0eb73804984414865f1a, server=9eb75de94ef3,41895,1734075182430}] 2024-12-13T07:34:12,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-13T07:34:12,222 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,41895,1734075182430 2024-12-13T07:34:12,222 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,36021,1734075182342 2024-12-13T07:34:12,225 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] handler.AssignRegionHandler(135): Open testtb-testConsecutiveExports,,1734075251802.f6a1d6d458cf0eb73804984414865f1a. 2024-12-13T07:34:12,225 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] handler.AssignRegionHandler(135): Open testtb-testConsecutiveExports,1,1734075251802.afb548b9703253e86c3615e776d3eca7. 2024-12-13T07:34:12,225 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7285): Opening region: {ENCODED => f6a1d6d458cf0eb73804984414865f1a, NAME => 'testtb-testConsecutiveExports,,1734075251802.f6a1d6d458cf0eb73804984414865f1a.', STARTKEY => '', ENDKEY => '1'} 2024-12-13T07:34:12,225 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7285): Opening region: {ENCODED => afb548b9703253e86c3615e776d3eca7, NAME => 'testtb-testConsecutiveExports,1,1734075251802.afb548b9703253e86c3615e776d3eca7.', STARTKEY => '1', ENDKEY => ''} 2024-12-13T07:34:12,225 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testConsecutiveExports,1,1734075251802.afb548b9703253e86c3615e776d3eca7. service=AccessControlService 2024-12-13T07:34:12,226 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testConsecutiveExports,,1734075251802.f6a1d6d458cf0eb73804984414865f1a. service=AccessControlService 2024-12-13T07:34:12,226 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T07:34:12,226 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T07:34:12,226 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports afb548b9703253e86c3615e776d3eca7 2024-12-13T07:34:12,226 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(894): Instantiated testtb-testConsecutiveExports,1,1734075251802.afb548b9703253e86c3615e776d3eca7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:34:12,226 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports f6a1d6d458cf0eb73804984414865f1a 2024-12-13T07:34:12,226 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7327): checking encryption for afb548b9703253e86c3615e776d3eca7 2024-12-13T07:34:12,226 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(894): Instantiated testtb-testConsecutiveExports,,1734075251802.f6a1d6d458cf0eb73804984414865f1a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:34:12,226 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7330): checking classloading for afb548b9703253e86c3615e776d3eca7 2024-12-13T07:34:12,226 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7327): checking encryption for f6a1d6d458cf0eb73804984414865f1a 2024-12-13T07:34:12,226 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7330): checking classloading for f6a1d6d458cf0eb73804984414865f1a 2024-12-13T07:34:12,228 INFO [StoreOpener-f6a1d6d458cf0eb73804984414865f1a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region f6a1d6d458cf0eb73804984414865f1a 2024-12-13T07:34:12,228 INFO [StoreOpener-afb548b9703253e86c3615e776d3eca7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region afb548b9703253e86c3615e776d3eca7 2024-12-13T07:34:12,229 INFO [StoreOpener-afb548b9703253e86c3615e776d3eca7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region afb548b9703253e86c3615e776d3eca7 columnFamilyName cf 2024-12-13T07:34:12,229 INFO [StoreOpener-f6a1d6d458cf0eb73804984414865f1a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f6a1d6d458cf0eb73804984414865f1a columnFamilyName cf 2024-12-13T07:34:12,229 DEBUG [StoreOpener-f6a1d6d458cf0eb73804984414865f1a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:34:12,229 DEBUG [StoreOpener-afb548b9703253e86c3615e776d3eca7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:34:12,230 INFO [StoreOpener-afb548b9703253e86c3615e776d3eca7-1 {}] regionserver.HStore(327): Store=afb548b9703253e86c3615e776d3eca7/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T07:34:12,230 INFO [StoreOpener-f6a1d6d458cf0eb73804984414865f1a-1 {}] regionserver.HStore(327): Store=f6a1d6d458cf0eb73804984414865f1a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T07:34:12,231 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testConsecutiveExports/f6a1d6d458cf0eb73804984414865f1a 2024-12-13T07:34:12,231 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testConsecutiveExports/afb548b9703253e86c3615e776d3eca7 2024-12-13T07:34:12,231 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testConsecutiveExports/afb548b9703253e86c3615e776d3eca7 2024-12-13T07:34:12,232 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testConsecutiveExports/f6a1d6d458cf0eb73804984414865f1a 2024-12-13T07:34:12,234 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(1085): writing seq id for afb548b9703253e86c3615e776d3eca7 2024-12-13T07:34:12,234 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(1085): writing seq id for f6a1d6d458cf0eb73804984414865f1a 2024-12-13T07:34:12,236 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testConsecutiveExports/afb548b9703253e86c3615e776d3eca7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T07:34:12,237 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testConsecutiveExports/f6a1d6d458cf0eb73804984414865f1a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T07:34:12,237 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(1102): Opened afb548b9703253e86c3615e776d3eca7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70948777, jitterRate=0.057219162583351135}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T07:34:12,238 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(1102): Opened f6a1d6d458cf0eb73804984414865f1a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72835850, jitterRate=0.08533874154090881}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T07:34:12,238 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(1001): Region open journal for afb548b9703253e86c3615e776d3eca7: 2024-12-13T07:34:12,238 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(1001): Region open journal for f6a1d6d458cf0eb73804984414865f1a: 2024-12-13T07:34:12,239 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testConsecutiveExports,1,1734075251802.afb548b9703253e86c3615e776d3eca7., pid=84, masterSystemTime=1734075252222 2024-12-13T07:34:12,239 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testConsecutiveExports,,1734075251802.f6a1d6d458cf0eb73804984414865f1a., pid=85, masterSystemTime=1734075252222 2024-12-13T07:34:12,242 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testConsecutiveExports,,1734075251802.f6a1d6d458cf0eb73804984414865f1a. 2024-12-13T07:34:12,243 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] handler.AssignRegionHandler(164): Opened testtb-testConsecutiveExports,,1734075251802.f6a1d6d458cf0eb73804984414865f1a. 2024-12-13T07:34:12,244 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=82 updating hbase:meta row=f6a1d6d458cf0eb73804984414865f1a, regionState=OPEN, openSeqNum=2, regionLocation=9eb75de94ef3,41895,1734075182430 2024-12-13T07:34:12,244 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testConsecutiveExports,1,1734075251802.afb548b9703253e86c3615e776d3eca7. 2024-12-13T07:34:12,244 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] handler.AssignRegionHandler(164): Opened testtb-testConsecutiveExports,1,1734075251802.afb548b9703253e86c3615e776d3eca7. 2024-12-13T07:34:12,245 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=83 updating hbase:meta row=afb548b9703253e86c3615e776d3eca7, regionState=OPEN, openSeqNum=2, regionLocation=9eb75de94ef3,36021,1734075182342 2024-12-13T07:34:12,249 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=85, resume processing ppid=82 2024-12-13T07:34:12,250 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, ppid=82, state=SUCCESS; OpenRegionProcedure f6a1d6d458cf0eb73804984414865f1a, server=9eb75de94ef3,41895,1734075182430 in 176 msec 2024-12-13T07:34:12,250 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-12-13T07:34:12,250 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; OpenRegionProcedure afb548b9703253e86c3615e776d3eca7, server=9eb75de94ef3,36021,1734075182342 in 178 msec 2024-12-13T07:34:12,251 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=f6a1d6d458cf0eb73804984414865f1a, ASSIGN in 333 msec 2024-12-13T07:34:12,252 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=83, resume processing ppid=81 2024-12-13T07:34:12,252 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, ppid=81, state=SUCCESS; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=afb548b9703253e86c3615e776d3eca7, ASSIGN in 334 msec 2024-12-13T07:34:12,252 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T07:34:12,252 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075252252"}]},"ts":"1734075252252"} 2024-12-13T07:34:12,254 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testConsecutiveExports, state=ENABLED in hbase:meta 2024-12-13T07:34:12,265 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T07:34:12,266 DEBUG [PEWorker-2 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testConsecutiveExports jenkins: RWXCA 2024-12-13T07:34:12,269 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41895 {}] access.PermissionStorage(611): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-13T07:34:12,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:34:12,340 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:34:12,340 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:34:12,340 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:34:12,348 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-13T07:34:12,348 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-13T07:34:12,348 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-13T07:34:12,348 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-13T07:34:12,350 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; CreateTableProcedure table=testtb-testConsecutiveExports in 545 msec 2024-12-13T07:34:12,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-13T07:34:12,415 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testConsecutiveExports, procId: 81 completed 2024-12-13T07:34:12,415 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testConsecutiveExports get assigned. Timeout = 60000ms 2024-12-13T07:34:12,416 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T07:34:12,419 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testConsecutiveExports assigned to meta. Checking AM states. 2024-12-13T07:34:12,420 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T07:34:12,420 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testConsecutiveExports assigned. 2024-12-13T07:34:12,423 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-13T07:34:12,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734075252423 (current time:1734075252423). 2024-12-13T07:34:12,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-13T07:34:12,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-13T07:34:12,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T07:34:12,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2e10cbf5 to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@33e410e8 2024-12-13T07:34:12,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6980e413, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:34:12,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:34:12,434 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38888, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:34:12,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2e10cbf5 to 127.0.0.1:59810 2024-12-13T07:34:12,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:34:12,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x465b27f5 to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3946d51f 2024-12-13T07:34:12,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57081e58, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:34:12,457 DEBUG [hconnection-0x2bd0a230-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:34:12,458 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38902, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:34:12,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:34:12,461 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53416, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:34:12,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x465b27f5 to 127.0.0.1:59810 2024-12-13T07:34:12,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:34:12,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] access.PermissionStorage(611): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-13T07:34:12,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T07:34:12,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=86, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-13T07:34:12,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 86 2024-12-13T07:34:12,465 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-13T07:34:12,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-13T07:34:12,466 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T07:34:12,468 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T07:34:12,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742017_1193 (size=161) 2024-12-13T07:34:12,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742017_1193 (size=161) 2024-12-13T07:34:12,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742017_1193 (size=161) 2024-12-13T07:34:12,476 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T07:34:12,476 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=87, ppid=86, state=RUNNABLE; SnapshotRegionProcedure f6a1d6d458cf0eb73804984414865f1a}, {pid=88, ppid=86, state=RUNNABLE; SnapshotRegionProcedure afb548b9703253e86c3615e776d3eca7}] 2024-12-13T07:34:12,477 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=87, ppid=86, state=RUNNABLE; SnapshotRegionProcedure f6a1d6d458cf0eb73804984414865f1a 2024-12-13T07:34:12,477 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=88, ppid=86, state=RUNNABLE; SnapshotRegionProcedure afb548b9703253e86c3615e776d3eca7 2024-12-13T07:34:12,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-13T07:34:12,628 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,36021,1734075182342 2024-12-13T07:34:12,628 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,41895,1734075182430 2024-12-13T07:34:12,628 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36021 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=88 2024-12-13T07:34:12,628 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41895 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=87 2024-12-13T07:34:12,628 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1734075251802.afb548b9703253e86c3615e776d3eca7. 2024-12-13T07:34:12,628 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1734075251802.f6a1d6d458cf0eb73804984414865f1a. 2024-12-13T07:34:12,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for afb548b9703253e86c3615e776d3eca7: 2024-12-13T07:34:12,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.HRegion(2538): Flush status journal for f6a1d6d458cf0eb73804984414865f1a: 2024-12-13T07:34:12,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1734075251802.afb548b9703253e86c3615e776d3eca7. for emptySnaptb0-testConsecutiveExports completed. 2024-12-13T07:34:12,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1734075251802.f6a1d6d458cf0eb73804984414865f1a. for emptySnaptb0-testConsecutiveExports completed. 2024-12-13T07:34:12,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1734075251802.afb548b9703253e86c3615e776d3eca7.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-13T07:34:12,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1734075251802.f6a1d6d458cf0eb73804984414865f1a.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-13T07:34:12,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T07:34:12,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T07:34:12,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-13T07:34:12,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-13T07:34:12,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742019_1195 (size=68) 2024-12-13T07:34:12,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742019_1195 (size=68) 2024-12-13T07:34:12,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742019_1195 (size=68) 2024-12-13T07:34:12,656 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1734075251802.f6a1d6d458cf0eb73804984414865f1a. 2024-12-13T07:34:12,657 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=87 2024-12-13T07:34:12,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster(4106): Remote procedure done, pid=87 2024-12-13T07:34:12,657 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region f6a1d6d458cf0eb73804984414865f1a 2024-12-13T07:34:12,657 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=87, ppid=86, state=RUNNABLE; SnapshotRegionProcedure f6a1d6d458cf0eb73804984414865f1a 2024-12-13T07:34:12,660 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, ppid=86, state=SUCCESS; SnapshotRegionProcedure f6a1d6d458cf0eb73804984414865f1a in 182 msec 2024-12-13T07:34:12,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742018_1194 (size=68) 2024-12-13T07:34:12,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742018_1194 (size=68) 2024-12-13T07:34:12,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742018_1194 (size=68) 2024-12-13T07:34:12,662 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1734075251802.afb548b9703253e86c3615e776d3eca7. 2024-12-13T07:34:12,662 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-12-13T07:34:12,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-12-13T07:34:12,662 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region afb548b9703253e86c3615e776d3eca7 2024-12-13T07:34:12,663 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=88, ppid=86, state=RUNNABLE; SnapshotRegionProcedure afb548b9703253e86c3615e776d3eca7 2024-12-13T07:34:12,664 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=86 2024-12-13T07:34:12,665 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T07:34:12,665 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=86, state=SUCCESS; SnapshotRegionProcedure afb548b9703253e86c3615e776d3eca7 in 187 msec 2024-12-13T07:34:12,665 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T07:34:12,666 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T07:34:12,666 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testConsecutiveExports 2024-12-13T07:34:12,666 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports 2024-12-13T07:34:12,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742020_1196 (size=543) 2024-12-13T07:34:12,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742020_1196 (size=543) 2024-12-13T07:34:12,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742020_1196 (size=543) 2024-12-13T07:34:12,708 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T07:34:12,714 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T07:34:12,715 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/emptySnaptb0-testConsecutiveExports 2024-12-13T07:34:12,718 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T07:34:12,718 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 86 2024-12-13T07:34:12,720 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 254 msec 2024-12-13T07:34:12,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-13T07:34:12,768 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports, procId: 86 completed 2024-12-13T07:34:12,779 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41895 {}] regionserver.HRegion(8254): writing data to region testtb-testConsecutiveExports,,1734075251802.f6a1d6d458cf0eb73804984414865f1a. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T07:34:12,780 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36021 {}] regionserver.HRegion(8254): writing data to region testtb-testConsecutiveExports,1,1734075251802.afb548b9703253e86c3615e776d3eca7. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T07:34:12,784 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testConsecutiveExports 2024-12-13T07:34:12,784 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testConsecutiveExports,,1734075251802.f6a1d6d458cf0eb73804984414865f1a. 2024-12-13T07:34:12,784 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T07:34:12,795 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-13T07:34:12,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734075252795 (current time:1734075252795). 2024-12-13T07:34:12,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-13T07:34:12,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-13T07:34:12,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T07:34:12,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c27ba68 to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2484cfe 2024-12-13T07:34:14,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cec4a4b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:34:14,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:34:14,619 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38908, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:34:14,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c27ba68 to 127.0.0.1:59810 2024-12-13T07:34:14,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:34:14,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x12e0ee92 to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@528f7f8c 2024-12-13T07:34:14,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c3fd984, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:34:14,879 DEBUG [hconnection-0x7095a403-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:34:14,880 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38910, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:34:14,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:34:14,883 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53418, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:34:14,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x12e0ee92 to 127.0.0.1:59810 2024-12-13T07:34:14,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:34:14,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] access.PermissionStorage(611): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-13T07:34:14,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T07:34:14,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-13T07:34:14,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 89 2024-12-13T07:34:14,888 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-13T07:34:14,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-13T07:34:14,889 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T07:34:14,892 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T07:34:14,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742021_1197 (size=156) 2024-12-13T07:34:14,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742021_1197 (size=156) 2024-12-13T07:34:14,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742021_1197 (size=156) 2024-12-13T07:34:14,908 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T07:34:14,908 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; SnapshotRegionProcedure f6a1d6d458cf0eb73804984414865f1a}, {pid=91, ppid=89, state=RUNNABLE; SnapshotRegionProcedure afb548b9703253e86c3615e776d3eca7}] 2024-12-13T07:34:14,908 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=91, ppid=89, state=RUNNABLE; SnapshotRegionProcedure afb548b9703253e86c3615e776d3eca7 2024-12-13T07:34:14,909 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=90, ppid=89, state=RUNNABLE; SnapshotRegionProcedure f6a1d6d458cf0eb73804984414865f1a 2024-12-13T07:34:14,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-13T07:34:15,059 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,36021,1734075182342 2024-12-13T07:34:15,059 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,41895,1734075182430 2024-12-13T07:34:15,060 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41895 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=90 2024-12-13T07:34:15,060 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36021 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=91 2024-12-13T07:34:15,060 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1734075251802.f6a1d6d458cf0eb73804984414865f1a. 2024-12-13T07:34:15,060 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1734075251802.afb548b9703253e86c3615e776d3eca7. 2024-12-13T07:34:15,060 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HRegion(2837): Flushing f6a1d6d458cf0eb73804984414865f1a 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-13T07:34:15,061 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HRegion(2837): Flushing afb548b9703253e86c3615e776d3eca7 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-13T07:34:15,074 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testConsecutiveExports/f6a1d6d458cf0eb73804984414865f1a/.tmp/cf/e23efac893cd487f80b88a05f6d8112c is 71, key is 032880ba53b7bc5b128f21c3d3b9a4e7/cf:q/1734075252779/Put/seqid=0 2024-12-13T07:34:15,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742022_1198 (size=5286) 2024-12-13T07:34:15,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742022_1198 (size=5286) 2024-12-13T07:34:15,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742022_1198 (size=5286) 2024-12-13T07:34:15,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testConsecutiveExports/afb548b9703253e86c3615e776d3eca7/.tmp/cf/3dd27ff2bc204e298ac5eb469b5ad8b9 is 71, key is 218b52aaa23e65ea1adf4367be3c9fb8/cf:q/1734075252780/Put/seqid=0 2024-12-13T07:34:15,083 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testConsecutiveExports/f6a1d6d458cf0eb73804984414865f1a/.tmp/cf/e23efac893cd487f80b88a05f6d8112c 2024-12-13T07:34:15,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742023_1199 (size=8326) 2024-12-13T07:34:15,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742023_1199 (size=8326) 2024-12-13T07:34:15,088 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testConsecutiveExports/afb548b9703253e86c3615e776d3eca7/.tmp/cf/3dd27ff2bc204e298ac5eb469b5ad8b9 2024-12-13T07:34:15,089 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testConsecutiveExports/f6a1d6d458cf0eb73804984414865f1a/.tmp/cf/e23efac893cd487f80b88a05f6d8112c as hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testConsecutiveExports/f6a1d6d458cf0eb73804984414865f1a/cf/e23efac893cd487f80b88a05f6d8112c 2024-12-13T07:34:15,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742023_1199 (size=8326) 2024-12-13T07:34:15,095 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testConsecutiveExports/afb548b9703253e86c3615e776d3eca7/.tmp/cf/3dd27ff2bc204e298ac5eb469b5ad8b9 as hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testConsecutiveExports/afb548b9703253e86c3615e776d3eca7/cf/3dd27ff2bc204e298ac5eb469b5ad8b9 2024-12-13T07:34:15,095 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testConsecutiveExports/f6a1d6d458cf0eb73804984414865f1a/cf/e23efac893cd487f80b88a05f6d8112c, entries=3, sequenceid=6, filesize=5.2 K 2024-12-13T07:34:15,096 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HRegion(3040): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for f6a1d6d458cf0eb73804984414865f1a in 36ms, sequenceid=6, compaction requested=false 2024-12-13T07:34:15,096 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports' 2024-12-13T07:34:15,097 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HRegion(2538): Flush status journal for f6a1d6d458cf0eb73804984414865f1a: 2024-12-13T07:34:15,097 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1734075251802.f6a1d6d458cf0eb73804984414865f1a. for snaptb0-testConsecutiveExports completed. 2024-12-13T07:34:15,097 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1734075251802.f6a1d6d458cf0eb73804984414865f1a.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-13T07:34:15,097 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T07:34:15,097 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testConsecutiveExports/f6a1d6d458cf0eb73804984414865f1a/cf/e23efac893cd487f80b88a05f6d8112c] hfiles 2024-12-13T07:34:15,098 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testConsecutiveExports/f6a1d6d458cf0eb73804984414865f1a/cf/e23efac893cd487f80b88a05f6d8112c for snapshot=snaptb0-testConsecutiveExports 2024-12-13T07:34:15,101 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testConsecutiveExports/afb548b9703253e86c3615e776d3eca7/cf/3dd27ff2bc204e298ac5eb469b5ad8b9, entries=47, sequenceid=6, filesize=8.1 K 2024-12-13T07:34:15,102 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HRegion(3040): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for afb548b9703253e86c3615e776d3eca7 in 42ms, sequenceid=6, compaction requested=false 2024-12-13T07:34:15,102 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HRegion(2538): Flush status journal for afb548b9703253e86c3615e776d3eca7: 2024-12-13T07:34:15,103 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1734075251802.afb548b9703253e86c3615e776d3eca7. for snaptb0-testConsecutiveExports completed. 2024-12-13T07:34:15,103 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1734075251802.afb548b9703253e86c3615e776d3eca7.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-13T07:34:15,103 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T07:34:15,103 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testConsecutiveExports/afb548b9703253e86c3615e776d3eca7/cf/3dd27ff2bc204e298ac5eb469b5ad8b9] hfiles 2024-12-13T07:34:15,103 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testConsecutiveExports/afb548b9703253e86c3615e776d3eca7/cf/3dd27ff2bc204e298ac5eb469b5ad8b9 for snapshot=snaptb0-testConsecutiveExports 2024-12-13T07:34:15,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742024_1200 (size=107) 2024-12-13T07:34:15,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742024_1200 (size=107) 2024-12-13T07:34:15,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742024_1200 (size=107) 2024-12-13T07:34:15,105 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1734075251802.f6a1d6d458cf0eb73804984414865f1a. 2024-12-13T07:34:15,105 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=90 2024-12-13T07:34:15,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster(4106): Remote procedure done, pid=90 2024-12-13T07:34:15,106 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region f6a1d6d458cf0eb73804984414865f1a 2024-12-13T07:34:15,106 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=90, ppid=89, state=RUNNABLE; SnapshotRegionProcedure f6a1d6d458cf0eb73804984414865f1a 2024-12-13T07:34:15,108 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; SnapshotRegionProcedure f6a1d6d458cf0eb73804984414865f1a in 199 msec 2024-12-13T07:34:15,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742025_1201 (size=107) 2024-12-13T07:34:15,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742025_1201 (size=107) 2024-12-13T07:34:15,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742025_1201 (size=107) 2024-12-13T07:34:15,114 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1734075251802.afb548b9703253e86c3615e776d3eca7. 2024-12-13T07:34:15,114 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=91 2024-12-13T07:34:15,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster(4106): Remote procedure done, pid=91 2024-12-13T07:34:15,114 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region afb548b9703253e86c3615e776d3eca7 2024-12-13T07:34:15,114 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=91, ppid=89, state=RUNNABLE; SnapshotRegionProcedure afb548b9703253e86c3615e776d3eca7 2024-12-13T07:34:15,117 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=91, resume processing ppid=89 2024-12-13T07:34:15,117 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T07:34:15,117 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, ppid=89, state=SUCCESS; SnapshotRegionProcedure afb548b9703253e86c3615e776d3eca7 in 207 msec 2024-12-13T07:34:15,117 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T07:34:15,118 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T07:34:15,118 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testConsecutiveExports 2024-12-13T07:34:15,118 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-13T07:34:15,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742026_1202 (size=621) 2024-12-13T07:34:15,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742026_1202 (size=621) 2024-12-13T07:34:15,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742026_1202 (size=621) 2024-12-13T07:34:15,129 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T07:34:15,135 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T07:34:15,135 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-13T07:34:15,137 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T07:34:15,137 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 89 2024-12-13T07:34:15,138 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 251 msec 2024-12-13T07:34:15,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-13T07:34:15,191 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports, procId: 89 completed 2024-12-13T07:34:15,191 INFO [Time-limited test {}] snapshot.TestExportSnapshot(476): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/local-export-1734075255191 2024-12-13T07:34:15,191 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/local-export-1734075255191, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/local-export-1734075255191, srcFsUri=hdfs://localhost:41477, srcDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:34:15,216 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:41477, inputRoot=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:34:15,217 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=org.apache.hadoop.fs.LocalFileSystem@576f0795, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/local-export-1734075255191, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/local-export-1734075255191/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-13T07:34:15,218 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-13T07:34:15,221 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/local-export-1734075255191/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-13T07:34:15,240 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-13T07:34:15,241 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-13T07:34:15,241 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-13T07:34:15,241 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-13T07:34:15,946 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-1_2/usercache/jenkins/appcache/application_1734075189398_0003/container_1734075189398_0003_01_000001/launch_container.sh] 2024-12-13T07:34:15,947 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-1_2/usercache/jenkins/appcache/application_1734075189398_0003/container_1734075189398_0003_01_000001/container_tokens] 2024-12-13T07:34:15,947 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-1_2/usercache/jenkins/appcache/application_1734075189398_0003/container_1734075189398_0003_01_000001/sysfs] 2024-12-13T07:34:15,948 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0003_000001 (auth:SIMPLE) from 127.0.0.1:34330 2024-12-13T07:34:16,052 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/hadoop-8314959696426725165.jar 2024-12-13T07:34:16,052 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-13T07:34:16,052 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-13T07:34:16,113 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/hadoop-12885519644574285862.jar 2024-12-13T07:34:16,113 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-13T07:34:16,114 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-13T07:34:16,114 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-13T07:34:16,114 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-13T07:34:16,114 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-13T07:34:16,114 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-13T07:34:16,115 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-13T07:34:16,115 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-13T07:34:16,115 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-13T07:34:16,115 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-13T07:34:16,116 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-13T07:34:16,116 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-13T07:34:16,116 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-13T07:34:16,116 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-13T07:34:16,116 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-13T07:34:16,117 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-13T07:34:16,117 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-13T07:34:16,117 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-13T07:34:16,117 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T07:34:16,118 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T07:34:16,118 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T07:34:16,118 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T07:34:16,118 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T07:34:16,118 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T07:34:16,118 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T07:34:16,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742027_1203 (size=127628) 2024-12-13T07:34:16,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742027_1203 (size=127628) 2024-12-13T07:34:16,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742027_1203 (size=127628) 2024-12-13T07:34:16,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742028_1204 (size=2172137) 2024-12-13T07:34:16,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742028_1204 (size=2172137) 2024-12-13T07:34:16,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742028_1204 (size=2172137) 2024-12-13T07:34:16,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742029_1205 (size=213228) 2024-12-13T07:34:16,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742029_1205 (size=213228) 2024-12-13T07:34:16,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742029_1205 (size=213228) 2024-12-13T07:34:16,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742030_1206 (size=451756) 2024-12-13T07:34:16,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742030_1206 (size=451756) 2024-12-13T07:34:16,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742030_1206 (size=451756) 2024-12-13T07:34:16,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742031_1207 (size=1877034) 2024-12-13T07:34:16,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742031_1207 (size=1877034) 2024-12-13T07:34:16,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742031_1207 (size=1877034) 2024-12-13T07:34:16,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742032_1208 (size=533455) 2024-12-13T07:34:16,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742032_1208 (size=533455) 2024-12-13T07:34:16,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742032_1208 (size=533455) 2024-12-13T07:34:16,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742033_1209 (size=7280644) 2024-12-13T07:34:16,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742033_1209 (size=7280644) 2024-12-13T07:34:16,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742033_1209 (size=7280644) 2024-12-13T07:34:16,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742034_1210 (size=6350899) 2024-12-13T07:34:16,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742034_1210 (size=6350899) 2024-12-13T07:34:16,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742034_1210 (size=6350899) 2024-12-13T07:34:16,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742035_1211 (size=4188619) 2024-12-13T07:34:16,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742035_1211 (size=4188619) 2024-12-13T07:34:16,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742035_1211 (size=4188619) 2024-12-13T07:34:16,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742036_1212 (size=20406) 2024-12-13T07:34:16,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742036_1212 (size=20406) 2024-12-13T07:34:16,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742036_1212 (size=20406) 2024-12-13T07:34:16,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742037_1213 (size=75495) 2024-12-13T07:34:16,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742037_1213 (size=75495) 2024-12-13T07:34:16,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742037_1213 (size=75495) 2024-12-13T07:34:16,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742038_1214 (size=45609) 2024-12-13T07:34:16,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742038_1214 (size=45609) 2024-12-13T07:34:16,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742038_1214 (size=45609) 2024-12-13T07:34:16,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742039_1215 (size=110084) 2024-12-13T07:34:16,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742039_1215 (size=110084) 2024-12-13T07:34:16,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742039_1215 (size=110084) 2024-12-13T07:34:16,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742040_1216 (size=1323991) 2024-12-13T07:34:16,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742040_1216 (size=1323991) 2024-12-13T07:34:16,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742040_1216 (size=1323991) 2024-12-13T07:34:16,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742041_1217 (size=23076) 2024-12-13T07:34:16,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742041_1217 (size=23076) 2024-12-13T07:34:16,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742041_1217 (size=23076) 2024-12-13T07:34:16,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742042_1218 (size=126803) 2024-12-13T07:34:16,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742042_1218 (size=126803) 2024-12-13T07:34:16,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742042_1218 (size=126803) 2024-12-13T07:34:16,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742043_1219 (size=322274) 2024-12-13T07:34:16,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742043_1219 (size=322274) 2024-12-13T07:34:16,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742043_1219 (size=322274) 2024-12-13T07:34:16,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742044_1220 (size=1832290) 2024-12-13T07:34:16,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742044_1220 (size=1832290) 2024-12-13T07:34:16,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742044_1220 (size=1832290) 2024-12-13T07:34:16,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742045_1221 (size=30081) 2024-12-13T07:34:16,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742045_1221 (size=30081) 2024-12-13T07:34:16,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742045_1221 (size=30081) 2024-12-13T07:34:16,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742046_1222 (size=53616) 2024-12-13T07:34:16,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742046_1222 (size=53616) 2024-12-13T07:34:16,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742046_1222 (size=53616) 2024-12-13T07:34:16,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742047_1223 (size=29229) 2024-12-13T07:34:16,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742047_1223 (size=29229) 2024-12-13T07:34:16,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742047_1223 (size=29229) 2024-12-13T07:34:16,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742048_1224 (size=169089) 2024-12-13T07:34:16,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742048_1224 (size=169089) 2024-12-13T07:34:16,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742048_1224 (size=169089) 2024-12-13T07:34:16,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742049_1225 (size=5175431) 2024-12-13T07:34:16,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742049_1225 (size=5175431) 2024-12-13T07:34:16,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742049_1225 (size=5175431) 2024-12-13T07:34:16,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742050_1226 (size=136454) 2024-12-13T07:34:16,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742050_1226 (size=136454) 2024-12-13T07:34:16,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742050_1226 (size=136454) 2024-12-13T07:34:16,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742051_1227 (size=907854) 2024-12-13T07:34:16,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742051_1227 (size=907854) 2024-12-13T07:34:16,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742051_1227 (size=907854) 2024-12-13T07:34:16,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742052_1228 (size=3317408) 2024-12-13T07:34:16,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742052_1228 (size=3317408) 2024-12-13T07:34:16,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742052_1228 (size=3317408) 2024-12-13T07:34:16,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742053_1229 (size=503880) 2024-12-13T07:34:16,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742053_1229 (size=503880) 2024-12-13T07:34:16,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742053_1229 (size=503880) 2024-12-13T07:34:16,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742054_1230 (size=4695811) 2024-12-13T07:34:16,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742054_1230 (size=4695811) 2024-12-13T07:34:16,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742054_1230 (size=4695811) 2024-12-13T07:34:16,873 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-13T07:34:16,875 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-13T07:34:16,877 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-13T07:34:16,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742055_1231 (size=338) 2024-12-13T07:34:16,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742055_1231 (size=338) 2024-12-13T07:34:16,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742055_1231 (size=338) 2024-12-13T07:34:16,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742056_1232 (size=15) 2024-12-13T07:34:16,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742056_1232 (size=15) 2024-12-13T07:34:16,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742056_1232 (size=15) 2024-12-13T07:34:16,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742057_1233 (size=304932) 2024-12-13T07:34:16,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742057_1233 (size=304932) 2024-12-13T07:34:16,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742057_1233 (size=304932) 2024-12-13T07:34:16,971 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-13T07:34:16,972 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-13T07:34:16,986 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-13T07:34:17,820 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0004_000001 (auth:SIMPLE) from 127.0.0.1:36660 2024-12-13T07:34:22,021 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-13T07:34:22,021 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports Metrics about Tables on a single HBase RegionServer 2024-12-13T07:34:22,837 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0004_000001 (auth:SIMPLE) from 127.0.0.1:55708 2024-12-13T07:34:23,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742058_1234 (size=350606) 2024-12-13T07:34:23,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742058_1234 (size=350606) 2024-12-13T07:34:23,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742058_1234 (size=350606) 2024-12-13T07:34:25,145 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0004_000001 (auth:SIMPLE) from 127.0.0.1:53134 2024-12-13T07:34:29,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742059_1235 (size=17447) 2024-12-13T07:34:29,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742059_1235 (size=17447) 2024-12-13T07:34:29,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742059_1235 (size=17447) 2024-12-13T07:34:29,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742060_1236 (size=462) 2024-12-13T07:34:29,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742060_1236 (size=462) 2024-12-13T07:34:29,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742060_1236 (size=462) 2024-12-13T07:34:29,061 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-1_2/usercache/jenkins/appcache/application_1734075189398_0004/container_1734075189398_0004_01_000002/launch_container.sh] 2024-12-13T07:34:29,061 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-1_2/usercache/jenkins/appcache/application_1734075189398_0004/container_1734075189398_0004_01_000002/container_tokens] 2024-12-13T07:34:29,061 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-1_2/usercache/jenkins/appcache/application_1734075189398_0004/container_1734075189398_0004_01_000002/sysfs] 2024-12-13T07:34:29,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742061_1237 (size=17447) 2024-12-13T07:34:29,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742061_1237 (size=17447) 2024-12-13T07:34:29,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742061_1237 (size=17447) 2024-12-13T07:34:29,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742062_1238 (size=350606) 2024-12-13T07:34:29,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742062_1238 (size=350606) 2024-12-13T07:34:29,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742062_1238 (size=350606) 2024-12-13T07:34:29,101 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0004_000001 (auth:SIMPLE) from 127.0.0.1:53144 2024-12-13T07:34:30,631 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T07:34:31,148 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-13T07:34:31,148 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-13T07:34:31,151 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testConsecutiveExports 2024-12-13T07:34:31,151 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-13T07:34:31,152 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-13T07:34:31,152 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-313103365_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-13T07:34:31,153 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-13T07:34:31,153 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-13T07:34:31,153 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in org.apache.hadoop.fs.LocalFileSystem@576f0795 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/local-export-1734075255191/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/local-export-1734075255191/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-13T07:34:31,153 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/local-export-1734075255191/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-13T07:34:31,153 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/local-export-1734075255191/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-13T07:34:31,155 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/local-export-1734075255191, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/local-export-1734075255191, srcFsUri=hdfs://localhost:41477, srcDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:34:31,194 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:41477, inputRoot=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:34:31,194 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=org.apache.hadoop.fs.LocalFileSystem@576f0795, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/local-export-1734075255191, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/local-export-1734075255191/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-13T07:34:31,197 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-13T07:34:31,209 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/local-export-1734075255191/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-13T07:34:31,240 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-13T07:34:31,240 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-13T07:34:31,241 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-13T07:34:31,241 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-13T07:34:32,240 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/hadoop-1403463216958616485.jar 2024-12-13T07:34:32,241 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-13T07:34:32,241 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-13T07:34:32,309 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/hadoop-17039534926210970553.jar 2024-12-13T07:34:32,310 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-13T07:34:32,310 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-13T07:34:32,311 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-13T07:34:32,311 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-13T07:34:32,311 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-13T07:34:32,311 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-13T07:34:32,312 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-13T07:34:32,312 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-13T07:34:32,312 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-13T07:34:32,312 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-13T07:34:32,312 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-13T07:34:32,313 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-13T07:34:32,313 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-13T07:34:32,313 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-13T07:34:32,313 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-13T07:34:32,314 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-13T07:34:32,314 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-13T07:34:32,314 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-13T07:34:32,314 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T07:34:32,314 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T07:34:32,315 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T07:34:32,315 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T07:34:32,315 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T07:34:32,315 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T07:34:32,316 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T07:34:32,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742063_1239 (size=127628) 2024-12-13T07:34:32,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742063_1239 (size=127628) 2024-12-13T07:34:32,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742063_1239 (size=127628) 2024-12-13T07:34:32,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742064_1240 (size=2172137) 2024-12-13T07:34:32,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742064_1240 (size=2172137) 2024-12-13T07:34:32,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742064_1240 (size=2172137) 2024-12-13T07:34:32,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742065_1241 (size=213228) 2024-12-13T07:34:32,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742065_1241 (size=213228) 2024-12-13T07:34:32,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742065_1241 (size=213228) 2024-12-13T07:34:32,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742066_1242 (size=1877034) 2024-12-13T07:34:32,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742066_1242 (size=1877034) 2024-12-13T07:34:32,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742066_1242 (size=1877034) 2024-12-13T07:34:32,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742067_1243 (size=533455) 2024-12-13T07:34:32,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742067_1243 (size=533455) 2024-12-13T07:34:32,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742067_1243 (size=533455) 2024-12-13T07:34:32,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742068_1244 (size=7280644) 2024-12-13T07:34:32,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742068_1244 (size=7280644) 2024-12-13T07:34:32,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742068_1244 (size=7280644) 2024-12-13T07:34:32,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742069_1245 (size=6350899) 2024-12-13T07:34:32,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742069_1245 (size=6350899) 2024-12-13T07:34:32,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742069_1245 (size=6350899) 2024-12-13T07:34:32,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742070_1246 (size=4188619) 2024-12-13T07:34:32,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742070_1246 (size=4188619) 2024-12-13T07:34:32,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742070_1246 (size=4188619) 2024-12-13T07:34:32,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742071_1247 (size=20406) 2024-12-13T07:34:32,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742071_1247 (size=20406) 2024-12-13T07:34:32,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742071_1247 (size=20406) 2024-12-13T07:34:32,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742072_1248 (size=75495) 2024-12-13T07:34:32,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742072_1248 (size=75495) 2024-12-13T07:34:32,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742072_1248 (size=75495) 2024-12-13T07:34:32,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742073_1249 (size=45609) 2024-12-13T07:34:32,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742073_1249 (size=45609) 2024-12-13T07:34:32,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742073_1249 (size=45609) 2024-12-13T07:34:32,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742074_1250 (size=110084) 2024-12-13T07:34:32,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742074_1250 (size=110084) 2024-12-13T07:34:32,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742074_1250 (size=110084) 2024-12-13T07:34:32,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742075_1251 (size=1323991) 2024-12-13T07:34:32,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742075_1251 (size=1323991) 2024-12-13T07:34:32,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742075_1251 (size=1323991) 2024-12-13T07:34:32,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742076_1252 (size=23076) 2024-12-13T07:34:32,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742076_1252 (size=23076) 2024-12-13T07:34:32,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742076_1252 (size=23076) 2024-12-13T07:34:32,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742077_1253 (size=126803) 2024-12-13T07:34:32,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742077_1253 (size=126803) 2024-12-13T07:34:32,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742077_1253 (size=126803) 2024-12-13T07:34:32,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742078_1254 (size=322274) 2024-12-13T07:34:32,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742078_1254 (size=322274) 2024-12-13T07:34:32,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742078_1254 (size=322274) 2024-12-13T07:34:32,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742079_1255 (size=1832290) 2024-12-13T07:34:32,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742079_1255 (size=1832290) 2024-12-13T07:34:32,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742079_1255 (size=1832290) 2024-12-13T07:34:32,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742080_1256 (size=30081) 2024-12-13T07:34:32,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742080_1256 (size=30081) 2024-12-13T07:34:32,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742080_1256 (size=30081) 2024-12-13T07:34:32,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742081_1257 (size=53616) 2024-12-13T07:34:32,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742081_1257 (size=53616) 2024-12-13T07:34:32,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742081_1257 (size=53616) 2024-12-13T07:34:32,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742082_1258 (size=29229) 2024-12-13T07:34:32,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742082_1258 (size=29229) 2024-12-13T07:34:32,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742082_1258 (size=29229) 2024-12-13T07:34:32,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742083_1259 (size=169089) 2024-12-13T07:34:32,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742083_1259 (size=169089) 2024-12-13T07:34:32,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742083_1259 (size=169089) 2024-12-13T07:34:32,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742084_1260 (size=5175431) 2024-12-13T07:34:32,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742084_1260 (size=5175431) 2024-12-13T07:34:32,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742084_1260 (size=5175431) 2024-12-13T07:34:32,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742085_1261 (size=136454) 2024-12-13T07:34:32,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742085_1261 (size=136454) 2024-12-13T07:34:32,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742085_1261 (size=136454) 2024-12-13T07:34:32,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742086_1262 (size=907854) 2024-12-13T07:34:32,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742086_1262 (size=907854) 2024-12-13T07:34:32,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742086_1262 (size=907854) 2024-12-13T07:34:32,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742087_1263 (size=3317408) 2024-12-13T07:34:32,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742087_1263 (size=3317408) 2024-12-13T07:34:32,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742087_1263 (size=3317408) 2024-12-13T07:34:32,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742088_1264 (size=451756) 2024-12-13T07:34:32,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742088_1264 (size=451756) 2024-12-13T07:34:32,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742088_1264 (size=451756) 2024-12-13T07:34:32,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742089_1265 (size=503880) 2024-12-13T07:34:32,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742089_1265 (size=503880) 2024-12-13T07:34:32,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742089_1265 (size=503880) 2024-12-13T07:34:32,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742090_1266 (size=4695811) 2024-12-13T07:34:32,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742090_1266 (size=4695811) 2024-12-13T07:34:32,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742090_1266 (size=4695811) 2024-12-13T07:34:32,663 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-13T07:34:32,665 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-13T07:34:32,667 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-13T07:34:32,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742091_1267 (size=338) 2024-12-13T07:34:32,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742091_1267 (size=338) 2024-12-13T07:34:32,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742091_1267 (size=338) 2024-12-13T07:34:32,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742092_1268 (size=15) 2024-12-13T07:34:32,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742092_1268 (size=15) 2024-12-13T07:34:32,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742092_1268 (size=15) 2024-12-13T07:34:32,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742093_1269 (size=304932) 2024-12-13T07:34:32,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742093_1269 (size=304932) 2024-12-13T07:34:32,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742093_1269 (size=304932) 2024-12-13T07:34:35,169 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-13T07:34:35,170 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-13T07:34:35,174 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0004_000001 (auth:SIMPLE) from 127.0.0.1:60550 2024-12-13T07:34:35,184 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-0_3/usercache/jenkins/appcache/application_1734075189398_0004/container_1734075189398_0004_01_000001/launch_container.sh] 2024-12-13T07:34:35,184 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-0_3/usercache/jenkins/appcache/application_1734075189398_0004/container_1734075189398_0004_01_000001/container_tokens] 2024-12-13T07:34:35,184 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-0_3/usercache/jenkins/appcache/application_1734075189398_0004/container_1734075189398_0004_01_000001/sysfs] 2024-12-13T07:34:36,049 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0005_000001 (auth:SIMPLE) from 127.0.0.1:56292 2024-12-13T07:34:41,074 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0005_000001 (auth:SIMPLE) from 127.0.0.1:53818 2024-12-13T07:34:41,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742094_1270 (size=350606) 2024-12-13T07:34:41,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742094_1270 (size=350606) 2024-12-13T07:34:41,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742094_1270 (size=350606) 2024-12-13T07:34:43,389 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0005_000001 (auth:SIMPLE) from 127.0.0.1:54384 2024-12-13T07:34:47,422 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-0_3/usercache/jenkins/appcache/application_1734075189398_0005/container_1734075189398_0005_01_000002/sysfs] 2024-12-13T07:34:48,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742095_1271 (size=16913) 2024-12-13T07:34:48,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742095_1271 (size=16913) 2024-12-13T07:34:48,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742095_1271 (size=16913) 2024-12-13T07:34:48,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742096_1272 (size=462) 2024-12-13T07:34:48,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742096_1272 (size=462) 2024-12-13T07:34:48,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742096_1272 (size=462) 2024-12-13T07:34:48,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742097_1273 (size=16913) 2024-12-13T07:34:48,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742097_1273 (size=16913) 2024-12-13T07:34:48,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742097_1273 (size=16913) 2024-12-13T07:34:48,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742098_1274 (size=350606) 2024-12-13T07:34:48,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742098_1274 (size=350606) 2024-12-13T07:34:48,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742098_1274 (size=350606) 2024-12-13T07:34:48,186 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0005_000001 (auth:SIMPLE) from 127.0.0.1:54400 2024-12-13T07:34:49,885 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-13T07:34:49,885 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-13T07:34:49,888 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testConsecutiveExports 2024-12-13T07:34:49,888 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-13T07:34:49,889 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-13T07:34:49,889 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-313103365_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-13T07:34:49,891 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-13T07:34:49,891 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-13T07:34:49,891 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in org.apache.hadoop.fs.LocalFileSystem@576f0795 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/local-export-1734075255191/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/local-export-1734075255191/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-13T07:34:49,891 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/local-export-1734075255191/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-13T07:34:49,891 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/local-export-1734075255191/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-13T07:34:49,908 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testConsecutiveExports 2024-12-13T07:34:49,908 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testConsecutiveExports 2024-12-13T07:34:49,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=92, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testConsecutiveExports 2024-12-13T07:34:49,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-13T07:34:49,913 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075289913"}]},"ts":"1734075289913"} 2024-12-13T07:34:49,915 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testConsecutiveExports, state=DISABLING in hbase:meta 2024-12-13T07:34:49,936 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testConsecutiveExports to state=DISABLING 2024-12-13T07:34:49,938 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=93, ppid=92, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testConsecutiveExports}] 2024-12-13T07:34:49,940 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=f6a1d6d458cf0eb73804984414865f1a, UNASSIGN}, {pid=95, ppid=93, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=afb548b9703253e86c3615e776d3eca7, UNASSIGN}] 2024-12-13T07:34:49,942 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=95, ppid=93, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=afb548b9703253e86c3615e776d3eca7, UNASSIGN 2024-12-13T07:34:49,942 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=94, ppid=93, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=f6a1d6d458cf0eb73804984414865f1a, UNASSIGN 2024-12-13T07:34:49,943 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=afb548b9703253e86c3615e776d3eca7, regionState=CLOSING, regionLocation=9eb75de94ef3,36021,1734075182342 2024-12-13T07:34:49,943 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=94 updating hbase:meta row=f6a1d6d458cf0eb73804984414865f1a, regionState=CLOSING, regionLocation=9eb75de94ef3,41895,1734075182430 2024-12-13T07:34:49,945 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T07:34:49,945 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=94, state=RUNNABLE; CloseRegionProcedure f6a1d6d458cf0eb73804984414865f1a, server=9eb75de94ef3,41895,1734075182430}] 2024-12-13T07:34:49,946 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T07:34:49,946 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=97, ppid=95, state=RUNNABLE; CloseRegionProcedure afb548b9703253e86c3615e776d3eca7, server=9eb75de94ef3,36021,1734075182342}] 2024-12-13T07:34:50,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-13T07:34:50,097 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,41895,1734075182430 2024-12-13T07:34:50,098 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(124): Close f6a1d6d458cf0eb73804984414865f1a 2024-12-13T07:34:50,098 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T07:34:50,098 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1681): Closing f6a1d6d458cf0eb73804984414865f1a, disabling compactions & flushes 2024-12-13T07:34:50,098 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1703): Closing region testtb-testConsecutiveExports,,1734075251802.f6a1d6d458cf0eb73804984414865f1a. 2024-12-13T07:34:50,098 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testConsecutiveExports,,1734075251802.f6a1d6d458cf0eb73804984414865f1a. 2024-12-13T07:34:50,098 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,36021,1734075182342 2024-12-13T07:34:50,098 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1791): Acquired close lock on testtb-testConsecutiveExports,,1734075251802.f6a1d6d458cf0eb73804984414865f1a. after waiting 0 ms 2024-12-13T07:34:50,098 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1801): Updates disabled for region testtb-testConsecutiveExports,,1734075251802.f6a1d6d458cf0eb73804984414865f1a. 2024-12-13T07:34:50,098 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] handler.UnassignRegionHandler(124): Close afb548b9703253e86c3615e776d3eca7 2024-12-13T07:34:50,099 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T07:34:50,099 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1681): Closing afb548b9703253e86c3615e776d3eca7, disabling compactions & flushes 2024-12-13T07:34:50,099 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1703): Closing region testtb-testConsecutiveExports,1,1734075251802.afb548b9703253e86c3615e776d3eca7. 2024-12-13T07:34:50,099 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testConsecutiveExports,1,1734075251802.afb548b9703253e86c3615e776d3eca7. 2024-12-13T07:34:50,099 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1791): Acquired close lock on testtb-testConsecutiveExports,1,1734075251802.afb548b9703253e86c3615e776d3eca7. after waiting 0 ms 2024-12-13T07:34:50,099 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1801): Updates disabled for region testtb-testConsecutiveExports,1,1734075251802.afb548b9703253e86c3615e776d3eca7. 2024-12-13T07:34:50,103 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testConsecutiveExports/afb548b9703253e86c3615e776d3eca7/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T07:34:50,103 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testConsecutiveExports/f6a1d6d458cf0eb73804984414865f1a/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T07:34:50,103 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T07:34:50,103 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T07:34:50,103 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1922): Closed testtb-testConsecutiveExports,1,1734075251802.afb548b9703253e86c3615e776d3eca7. 2024-12-13T07:34:50,103 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1922): Closed testtb-testConsecutiveExports,,1734075251802.f6a1d6d458cf0eb73804984414865f1a. 2024-12-13T07:34:50,103 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1635): Region close journal for afb548b9703253e86c3615e776d3eca7: 2024-12-13T07:34:50,103 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1635): Region close journal for f6a1d6d458cf0eb73804984414865f1a: 2024-12-13T07:34:50,105 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] handler.UnassignRegionHandler(170): Closed afb548b9703253e86c3615e776d3eca7 2024-12-13T07:34:50,105 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=afb548b9703253e86c3615e776d3eca7, regionState=CLOSED 2024-12-13T07:34:50,105 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(170): Closed f6a1d6d458cf0eb73804984414865f1a 2024-12-13T07:34:50,106 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=94 updating hbase:meta row=f6a1d6d458cf0eb73804984414865f1a, regionState=CLOSED 2024-12-13T07:34:50,109 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=97, resume processing ppid=95 2024-12-13T07:34:50,110 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=94 2024-12-13T07:34:50,110 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, ppid=95, state=SUCCESS; CloseRegionProcedure afb548b9703253e86c3615e776d3eca7, server=9eb75de94ef3,36021,1734075182342 in 160 msec 2024-12-13T07:34:50,110 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=94, state=SUCCESS; CloseRegionProcedure f6a1d6d458cf0eb73804984414865f1a, server=9eb75de94ef3,41895,1734075182430 in 162 msec 2024-12-13T07:34:50,110 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, ppid=93, state=SUCCESS; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=afb548b9703253e86c3615e776d3eca7, UNASSIGN in 169 msec 2024-12-13T07:34:50,111 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-12-13T07:34:50,111 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=f6a1d6d458cf0eb73804984414865f1a, UNASSIGN in 170 msec 2024-12-13T07:34:50,112 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=93, resume processing ppid=92 2024-12-13T07:34:50,112 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, ppid=92, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testConsecutiveExports in 174 msec 2024-12-13T07:34:50,114 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075290113"}]},"ts":"1734075290113"} 2024-12-13T07:34:50,115 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testConsecutiveExports, state=DISABLED in hbase:meta 2024-12-13T07:34:50,123 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testConsecutiveExports to state=DISABLED 2024-12-13T07:34:50,124 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, state=SUCCESS; DisableTableProcedure table=testtb-testConsecutiveExports in 215 msec 2024-12-13T07:34:50,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-13T07:34:50,217 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testConsecutiveExports, procId: 92 completed 2024-12-13T07:34:50,217 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testConsecutiveExports 2024-12-13T07:34:50,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=98, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-13T07:34:50,219 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=98, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-13T07:34:50,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testConsecutiveExports 2024-12-13T07:34:50,220 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=98, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-13T07:34:50,221 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41895 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testConsecutiveExports 2024-12-13T07:34:50,223 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testConsecutiveExports/afb548b9703253e86c3615e776d3eca7 2024-12-13T07:34:50,223 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testConsecutiveExports/f6a1d6d458cf0eb73804984414865f1a 2024-12-13T07:34:50,224 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testConsecutiveExports/afb548b9703253e86c3615e776d3eca7/cf, FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testConsecutiveExports/afb548b9703253e86c3615e776d3eca7/recovered.edits] 2024-12-13T07:34:50,224 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testConsecutiveExports/f6a1d6d458cf0eb73804984414865f1a/cf, FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testConsecutiveExports/f6a1d6d458cf0eb73804984414865f1a/recovered.edits] 2024-12-13T07:34:50,228 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testConsecutiveExports/f6a1d6d458cf0eb73804984414865f1a/cf/e23efac893cd487f80b88a05f6d8112c to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testConsecutiveExports/f6a1d6d458cf0eb73804984414865f1a/cf/e23efac893cd487f80b88a05f6d8112c 2024-12-13T07:34:50,228 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testConsecutiveExports/afb548b9703253e86c3615e776d3eca7/cf/3dd27ff2bc204e298ac5eb469b5ad8b9 to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testConsecutiveExports/afb548b9703253e86c3615e776d3eca7/cf/3dd27ff2bc204e298ac5eb469b5ad8b9 2024-12-13T07:34:50,231 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testConsecutiveExports/afb548b9703253e86c3615e776d3eca7/recovered.edits/9.seqid to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testConsecutiveExports/afb548b9703253e86c3615e776d3eca7/recovered.edits/9.seqid 2024-12-13T07:34:50,231 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testConsecutiveExports/f6a1d6d458cf0eb73804984414865f1a/recovered.edits/9.seqid to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testConsecutiveExports/f6a1d6d458cf0eb73804984414865f1a/recovered.edits/9.seqid 2024-12-13T07:34:50,232 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testConsecutiveExports/afb548b9703253e86c3615e776d3eca7 2024-12-13T07:34:50,232 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testConsecutiveExports/f6a1d6d458cf0eb73804984414865f1a 2024-12-13T07:34:50,232 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testConsecutiveExports regions 2024-12-13T07:34:50,234 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=98, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-13T07:34:50,236 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testConsecutiveExports from hbase:meta 2024-12-13T07:34:50,238 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testConsecutiveExports' descriptor. 2024-12-13T07:34:50,239 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=98, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-13T07:34:50,239 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-13T07:34:50,239 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-13T07:34:50,239 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-13T07:34:50,239 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testConsecutiveExports' from region states. 2024-12-13T07:34:50,239 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-13T07:34:50,240 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,,1734075251802.f6a1d6d458cf0eb73804984414865f1a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734075290239"}]},"ts":"9223372036854775807"} 2024-12-13T07:34:50,240 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,1,1734075251802.afb548b9703253e86c3615e776d3eca7.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734075290239"}]},"ts":"9223372036854775807"} 2024-12-13T07:34:50,240 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-13T07:34:50,240 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-13T07:34:50,240 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-13T07:34:50,240 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-13T07:34:50,242 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-13T07:34:50,242 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => f6a1d6d458cf0eb73804984414865f1a, NAME => 'testtb-testConsecutiveExports,,1734075251802.f6a1d6d458cf0eb73804984414865f1a.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => afb548b9703253e86c3615e776d3eca7, NAME => 'testtb-testConsecutiveExports,1,1734075251802.afb548b9703253e86c3615e776d3eca7.', STARTKEY => '1', ENDKEY => ''}] 2024-12-13T07:34:50,242 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testConsecutiveExports' as deleted. 2024-12-13T07:34:50,243 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734075290242"}]},"ts":"9223372036854775807"} 2024-12-13T07:34:50,245 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testConsecutiveExports state from META 2024-12-13T07:34:50,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-13T07:34:50,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-13T07:34:50,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-13T07:34:50,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:34:50,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:34:50,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:34:50,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-13T07:34:50,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:34:50,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-13T07:34:50,257 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=98, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-13T07:34:50,258 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, state=SUCCESS; DeleteTableProcedure table=testtb-testConsecutiveExports in 40 msec 2024-12-13T07:34:50,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-13T07:34:50,350 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testConsecutiveExports, procId: 98 completed 2024-12-13T07:34:50,358 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testConsecutiveExports" 2024-12-13T07:34:50,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testConsecutiveExports 2024-12-13T07:34:50,362 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testConsecutiveExports" 2024-12-13T07:34:50,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testConsecutiveExports 2024-12-13T07:34:50,386 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=800 (was 792) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43841 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x6218feab-shared-pool-25 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38643 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1997580727_1 at /127.0.0.1:48382 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1997580727_1 at /127.0.0.1:34204 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x6218feab-shared-pool-23 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x6218feab-shared-pool-22 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-313103365_22 at /127.0.0.1:35532 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: hconnection-0x6218feab-shared-pool-24 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1658146201) connection to localhost/127.0.0.1:43841 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Thread-4014 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: ApplicationMasterLauncher #8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46277 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-313103365_22 at /127.0.0.1:59472 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 64500) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=807 (was 817), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=544 (was 589), ProcessCount=17 (was 20), AvailableMemoryMB=6565 (was 6856) 2024-12-13T07:34:50,387 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=800 is superior to 500 2024-12-13T07:34:50,402 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=800, OpenFileDescriptor=807, MaxFileDescriptor=1048576, SystemLoadAverage=544, ProcessCount=17, AvailableMemoryMB=6565 2024-12-13T07:34:50,402 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=800 is superior to 500 2024-12-13T07:34:50,403 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T07:34:50,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=99, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T07:34:50,406 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T07:34:50,406 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:34:50,406 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion" procId is: 99 2024-12-13T07:34:50,407 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T07:34:50,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-13T07:34:50,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742099_1275 (size=422) 2024-12-13T07:34:50,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742099_1275 (size=422) 2024-12-13T07:34:50,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742099_1275 (size=422) 2024-12-13T07:34:50,415 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 2a8e991f17b65aad4b88bb288fe67388, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1734075290403.2a8e991f17b65aad4b88bb288fe67388.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:34:50,415 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 8048339c00f55b04bd8357c5a420a285, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1734075290403.8048339c00f55b04bd8357c5a420a285.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:34:50,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742101_1277 (size=83) 2024-12-13T07:34:50,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742100_1276 (size=83) 2024-12-13T07:34:50,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742101_1277 (size=83) 2024-12-13T07:34:50,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742101_1277 (size=83) 2024-12-13T07:34:50,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742100_1276 (size=83) 2024-12-13T07:34:50,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742100_1276 (size=83) 2024-12-13T07:34:50,426 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1734075290403.8048339c00f55b04bd8357c5a420a285.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:34:50,426 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1734075290403.2a8e991f17b65aad4b88bb288fe67388.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:34:50,427 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1681): Closing 8048339c00f55b04bd8357c5a420a285, disabling compactions & flushes 2024-12-13T07:34:50,427 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1681): Closing 2a8e991f17b65aad4b88bb288fe67388, disabling compactions & flushes 2024-12-13T07:34:50,427 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1734075290403.8048339c00f55b04bd8357c5a420a285. 2024-12-13T07:34:50,427 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1734075290403.8048339c00f55b04bd8357c5a420a285. 2024-12-13T07:34:50,427 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1734075290403.2a8e991f17b65aad4b88bb288fe67388. 2024-12-13T07:34:50,427 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1734075290403.8048339c00f55b04bd8357c5a420a285. after waiting 0 ms 2024-12-13T07:34:50,427 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1734075290403.2a8e991f17b65aad4b88bb288fe67388. 2024-12-13T07:34:50,427 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1734075290403.8048339c00f55b04bd8357c5a420a285. 2024-12-13T07:34:50,427 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1734075290403.2a8e991f17b65aad4b88bb288fe67388. after waiting 0 ms 2024-12-13T07:34:50,427 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1734075290403.8048339c00f55b04bd8357c5a420a285. 2024-12-13T07:34:50,427 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1734075290403.2a8e991f17b65aad4b88bb288fe67388. 2024-12-13T07:34:50,427 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1635): Region close journal for 8048339c00f55b04bd8357c5a420a285: 2024-12-13T07:34:50,427 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion,,1734075290403.2a8e991f17b65aad4b88bb288fe67388. 2024-12-13T07:34:50,427 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1635): Region close journal for 2a8e991f17b65aad4b88bb288fe67388: 2024-12-13T07:34:50,428 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T07:34:50,428 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1734075290403.8048339c00f55b04bd8357c5a420a285.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1734075290428"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734075290428"}]},"ts":"1734075290428"} 2024-12-13T07:34:50,428 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1734075290403.2a8e991f17b65aad4b88bb288fe67388.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1734075290428"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734075290428"}]},"ts":"1734075290428"} 2024-12-13T07:34:50,430 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-13T07:34:50,430 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T07:34:50,431 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075290431"}]},"ts":"1734075290431"} 2024-12-13T07:34:50,432 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLING in hbase:meta 2024-12-13T07:34:50,448 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(202): Hosts are {9eb75de94ef3=0} racks are {/default-rack=0} 2024-12-13T07:34:50,449 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-13T07:34:50,449 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-13T07:34:50,449 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-13T07:34:50,449 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-13T07:34:50,449 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-13T07:34:50,449 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-13T07:34:50,449 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-13T07:34:50,450 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=2a8e991f17b65aad4b88bb288fe67388, ASSIGN}, {pid=101, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=8048339c00f55b04bd8357c5a420a285, ASSIGN}] 2024-12-13T07:34:50,451 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=100, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=2a8e991f17b65aad4b88bb288fe67388, ASSIGN 2024-12-13T07:34:50,451 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=101, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=8048339c00f55b04bd8357c5a420a285, ASSIGN 2024-12-13T07:34:50,452 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=101, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=8048339c00f55b04bd8357c5a420a285, ASSIGN; state=OFFLINE, location=9eb75de94ef3,41895,1734075182430; forceNewPlan=false, retain=false 2024-12-13T07:34:50,452 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=100, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=2a8e991f17b65aad4b88bb288fe67388, ASSIGN; state=OFFLINE, location=9eb75de94ef3,36021,1734075182342; forceNewPlan=false, retain=false 2024-12-13T07:34:50,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-13T07:34:50,602 INFO [9eb75de94ef3:33713 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-13T07:34:50,602 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=8048339c00f55b04bd8357c5a420a285, regionState=OPENING, regionLocation=9eb75de94ef3,41895,1734075182430 2024-12-13T07:34:50,602 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=100 updating hbase:meta row=2a8e991f17b65aad4b88bb288fe67388, regionState=OPENING, regionLocation=9eb75de94ef3,36021,1734075182342 2024-12-13T07:34:50,604 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=102, ppid=100, state=RUNNABLE; OpenRegionProcedure 2a8e991f17b65aad4b88bb288fe67388, server=9eb75de94ef3,36021,1734075182342}] 2024-12-13T07:34:50,605 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=101, state=RUNNABLE; OpenRegionProcedure 8048339c00f55b04bd8357c5a420a285, server=9eb75de94ef3,41895,1734075182430}] 2024-12-13T07:34:50,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-13T07:34:50,756 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,36021,1734075182342 2024-12-13T07:34:50,757 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,41895,1734075182430 2024-12-13T07:34:50,759 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion,,1734075290403.2a8e991f17b65aad4b88bb288fe67388. 2024-12-13T07:34:50,759 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion,1,1734075290403.8048339c00f55b04bd8357c5a420a285. 2024-12-13T07:34:50,759 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7285): Opening region: {ENCODED => 8048339c00f55b04bd8357c5a420a285, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1734075290403.8048339c00f55b04bd8357c5a420a285.', STARTKEY => '1', ENDKEY => ''} 2024-12-13T07:34:50,759 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7285): Opening region: {ENCODED => 2a8e991f17b65aad4b88bb288fe67388, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1734075290403.2a8e991f17b65aad4b88bb288fe67388.', STARTKEY => '', ENDKEY => '1'} 2024-12-13T07:34:50,760 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,1,1734075290403.8048339c00f55b04bd8357c5a420a285. service=AccessControlService 2024-12-13T07:34:50,760 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,,1734075290403.2a8e991f17b65aad4b88bb288fe67388. service=AccessControlService 2024-12-13T07:34:50,760 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T07:34:50,760 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T07:34:50,760 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 2a8e991f17b65aad4b88bb288fe67388 2024-12-13T07:34:50,760 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 8048339c00f55b04bd8357c5a420a285 2024-12-13T07:34:50,760 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1734075290403.2a8e991f17b65aad4b88bb288fe67388.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:34:50,760 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1734075290403.8048339c00f55b04bd8357c5a420a285.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:34:50,760 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7327): checking encryption for 2a8e991f17b65aad4b88bb288fe67388 2024-12-13T07:34:50,760 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7327): checking encryption for 8048339c00f55b04bd8357c5a420a285 2024-12-13T07:34:50,760 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7330): checking classloading for 2a8e991f17b65aad4b88bb288fe67388 2024-12-13T07:34:50,760 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7330): checking classloading for 8048339c00f55b04bd8357c5a420a285 2024-12-13T07:34:50,762 INFO [StoreOpener-8048339c00f55b04bd8357c5a420a285-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 8048339c00f55b04bd8357c5a420a285 2024-12-13T07:34:50,762 INFO [StoreOpener-2a8e991f17b65aad4b88bb288fe67388-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 2a8e991f17b65aad4b88bb288fe67388 2024-12-13T07:34:50,763 INFO [StoreOpener-8048339c00f55b04bd8357c5a420a285-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8048339c00f55b04bd8357c5a420a285 columnFamilyName cf 2024-12-13T07:34:50,763 INFO [StoreOpener-2a8e991f17b65aad4b88bb288fe67388-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2a8e991f17b65aad4b88bb288fe67388 columnFamilyName cf 2024-12-13T07:34:50,763 DEBUG [StoreOpener-2a8e991f17b65aad4b88bb288fe67388-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:34:50,763 DEBUG [StoreOpener-8048339c00f55b04bd8357c5a420a285-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:34:50,764 INFO [StoreOpener-2a8e991f17b65aad4b88bb288fe67388-1 {}] regionserver.HStore(327): Store=2a8e991f17b65aad4b88bb288fe67388/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T07:34:50,764 INFO [StoreOpener-8048339c00f55b04bd8357c5a420a285-1 {}] regionserver.HStore(327): Store=8048339c00f55b04bd8357c5a420a285/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T07:34:50,765 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion/8048339c00f55b04bd8357c5a420a285 2024-12-13T07:34:50,765 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion/2a8e991f17b65aad4b88bb288fe67388 2024-12-13T07:34:50,765 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion/8048339c00f55b04bd8357c5a420a285 2024-12-13T07:34:50,765 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion/2a8e991f17b65aad4b88bb288fe67388 2024-12-13T07:34:50,767 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1085): writing seq id for 2a8e991f17b65aad4b88bb288fe67388 2024-12-13T07:34:50,767 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1085): writing seq id for 8048339c00f55b04bd8357c5a420a285 2024-12-13T07:34:50,769 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion/2a8e991f17b65aad4b88bb288fe67388/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T07:34:50,769 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion/8048339c00f55b04bd8357c5a420a285/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T07:34:50,769 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1102): Opened 8048339c00f55b04bd8357c5a420a285; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72736267, jitterRate=0.08385483920574188}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T07:34:50,770 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1102): Opened 2a8e991f17b65aad4b88bb288fe67388; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68819398, jitterRate=0.025488942861557007}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T07:34:50,770 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1001): Region open journal for 8048339c00f55b04bd8357c5a420a285: 2024-12-13T07:34:50,770 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1001): Region open journal for 2a8e991f17b65aad4b88bb288fe67388: 2024-12-13T07:34:50,771 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,,1734075290403.2a8e991f17b65aad4b88bb288fe67388., pid=102, masterSystemTime=1734075290756 2024-12-13T07:34:50,771 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,1,1734075290403.8048339c00f55b04bd8357c5a420a285., pid=103, masterSystemTime=1734075290757 2024-12-13T07:34:50,772 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,,1734075290403.2a8e991f17b65aad4b88bb288fe67388. 2024-12-13T07:34:50,772 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion,,1734075290403.2a8e991f17b65aad4b88bb288fe67388. 2024-12-13T07:34:50,772 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=100 updating hbase:meta row=2a8e991f17b65aad4b88bb288fe67388, regionState=OPEN, openSeqNum=2, regionLocation=9eb75de94ef3,36021,1734075182342 2024-12-13T07:34:50,772 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,1,1734075290403.8048339c00f55b04bd8357c5a420a285. 2024-12-13T07:34:50,772 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion,1,1734075290403.8048339c00f55b04bd8357c5a420a285. 2024-12-13T07:34:50,773 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=8048339c00f55b04bd8357c5a420a285, regionState=OPEN, openSeqNum=2, regionLocation=9eb75de94ef3,41895,1734075182430 2024-12-13T07:34:50,775 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=102, resume processing ppid=100 2024-12-13T07:34:50,776 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, ppid=100, state=SUCCESS; OpenRegionProcedure 2a8e991f17b65aad4b88bb288fe67388, server=9eb75de94ef3,36021,1734075182342 in 170 msec 2024-12-13T07:34:50,776 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=101 2024-12-13T07:34:50,776 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=101, state=SUCCESS; OpenRegionProcedure 8048339c00f55b04bd8357c5a420a285, server=9eb75de94ef3,41895,1734075182430 in 169 msec 2024-12-13T07:34:50,776 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, ppid=99, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=2a8e991f17b65aad4b88bb288fe67388, ASSIGN in 325 msec 2024-12-13T07:34:50,777 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=101, resume processing ppid=99 2024-12-13T07:34:50,777 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=99, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=8048339c00f55b04bd8357c5a420a285, ASSIGN in 326 msec 2024-12-13T07:34:50,778 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T07:34:50,778 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075290778"}]},"ts":"1734075290778"} 2024-12-13T07:34:50,779 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLED in hbase:meta 2024-12-13T07:34:50,849 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T07:34:50,849 DEBUG [PEWorker-2 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion jenkins: RWXCA 2024-12-13T07:34:50,851 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41895 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-13T07:34:50,861 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:34:50,861 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:34:50,861 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:34:50,861 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:34:50,870 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-13T07:34:50,870 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-13T07:34:50,870 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-13T07:34:50,870 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-13T07:34:50,871 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, state=SUCCESS; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 466 msec 2024-12-13T07:34:51,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-13T07:34:51,011 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 99 completed 2024-12-13T07:34:51,011 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportFileSystemStateWithMergeRegion get assigned. Timeout = 60000ms 2024-12-13T07:34:51,011 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T07:34:51,014 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned to meta. Checking AM states. 2024-12-13T07:34:51,014 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T07:34:51,014 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned. 2024-12-13T07:34:51,017 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-13T07:34:51,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734075291017 (current time:1734075291017). 2024-12-13T07:34:51,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-13T07:34:51,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-13T07:34:51,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T07:34:51,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6c4c1681 to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7d0a98f0 2024-12-13T07:34:51,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5300e2f6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:34:51,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:34:51,031 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35662, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:34:51,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6c4c1681 to 127.0.0.1:59810 2024-12-13T07:34:51,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:34:51,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x43ddf044 to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6a392dd7 2024-12-13T07:34:51,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e7a6534, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:34:51,050 DEBUG [hconnection-0x39ad5d2a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:34:51,051 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35668, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:34:51,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:34:51,054 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54428, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:34:51,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x43ddf044 to 127.0.0.1:59810 2024-12-13T07:34:51,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:34:51,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-13T07:34:51,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T07:34:51,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-13T07:34:51,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-13T07:34:51,057 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-13T07:34:51,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-13T07:34:51,058 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T07:34:51,060 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T07:34:51,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742102_1278 (size=215) 2024-12-13T07:34:51,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742102_1278 (size=215) 2024-12-13T07:34:51,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742102_1278 (size=215) 2024-12-13T07:34:51,067 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T07:34:51,067 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE; SnapshotRegionProcedure 2a8e991f17b65aad4b88bb288fe67388}, {pid=106, ppid=104, state=RUNNABLE; SnapshotRegionProcedure 8048339c00f55b04bd8357c5a420a285}] 2024-12-13T07:34:51,068 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=106, ppid=104, state=RUNNABLE; SnapshotRegionProcedure 8048339c00f55b04bd8357c5a420a285 2024-12-13T07:34:51,068 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=105, ppid=104, state=RUNNABLE; SnapshotRegionProcedure 2a8e991f17b65aad4b88bb288fe67388 2024-12-13T07:34:51,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-13T07:34:51,219 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,41895,1734075182430 2024-12-13T07:34:51,219 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,36021,1734075182342 2024-12-13T07:34:51,219 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41895 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=106 2024-12-13T07:34:51,220 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36021 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=105 2024-12-13T07:34:51,220 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1734075290403.8048339c00f55b04bd8357c5a420a285. 2024-12-13T07:34:51,220 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1734075290403.2a8e991f17b65aad4b88bb288fe67388. 2024-12-13T07:34:51,220 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2538): Flush status journal for 8048339c00f55b04bd8357c5a420a285: 2024-12-13T07:34:51,220 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2538): Flush status journal for 2a8e991f17b65aad4b88bb288fe67388: 2024-12-13T07:34:51,220 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1734075290403.8048339c00f55b04bd8357c5a420a285. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-13T07:34:51,220 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1734075290403.2a8e991f17b65aad4b88bb288fe67388. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-13T07:34:51,220 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1734075290403.8048339c00f55b04bd8357c5a420a285.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-13T07:34:51,220 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1734075290403.2a8e991f17b65aad4b88bb288fe67388.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-13T07:34:51,220 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T07:34:51,220 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T07:34:51,220 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-13T07:34:51,220 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-13T07:34:51,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742104_1280 (size=86) 2024-12-13T07:34:51,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742103_1279 (size=86) 2024-12-13T07:34:51,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742104_1280 (size=86) 2024-12-13T07:34:51,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742103_1279 (size=86) 2024-12-13T07:34:51,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742103_1279 (size=86) 2024-12-13T07:34:51,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742104_1280 (size=86) 2024-12-13T07:34:51,227 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1734075290403.2a8e991f17b65aad4b88bb288fe67388. 2024-12-13T07:34:51,227 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1734075290403.8048339c00f55b04bd8357c5a420a285. 2024-12-13T07:34:51,227 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=106 2024-12-13T07:34:51,227 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-12-13T07:34:51,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster(4106): Remote procedure done, pid=106 2024-12-13T07:34:51,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.HMaster(4106): Remote procedure done, pid=105 2024-12-13T07:34:51,228 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 8048339c00f55b04bd8357c5a420a285 2024-12-13T07:34:51,228 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 2a8e991f17b65aad4b88bb288fe67388 2024-12-13T07:34:51,228 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=106, ppid=104, state=RUNNABLE; SnapshotRegionProcedure 8048339c00f55b04bd8357c5a420a285 2024-12-13T07:34:51,228 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=105, ppid=104, state=RUNNABLE; SnapshotRegionProcedure 2a8e991f17b65aad4b88bb288fe67388 2024-12-13T07:34:51,229 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; SnapshotRegionProcedure 2a8e991f17b65aad4b88bb288fe67388 in 161 msec 2024-12-13T07:34:51,230 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=106, resume processing ppid=104 2024-12-13T07:34:51,230 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, ppid=104, state=SUCCESS; SnapshotRegionProcedure 8048339c00f55b04bd8357c5a420a285 in 161 msec 2024-12-13T07:34:51,230 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T07:34:51,230 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T07:34:51,231 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T07:34:51,231 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-13T07:34:51,232 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-13T07:34:51,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742105_1281 (size=597) 2024-12-13T07:34:51,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742105_1281 (size=597) 2024-12-13T07:34:51,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742105_1281 (size=597) 2024-12-13T07:34:51,242 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T07:34:51,247 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T07:34:51,247 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-13T07:34:51,248 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T07:34:51,249 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-13T07:34:51,250 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 193 msec 2024-12-13T07:34:51,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-13T07:34:51,360 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 104 completed 2024-12-13T07:34:51,369 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36021 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemStateWithMergeRegion,,1734075290403.2a8e991f17b65aad4b88bb288fe67388. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T07:34:51,369 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41895 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemStateWithMergeRegion,1,1734075290403.8048339c00f55b04bd8357c5a420a285. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T07:34:51,374 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T07:34:51,374 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1734075290403.2a8e991f17b65aad4b88bb288fe67388. 2024-12-13T07:34:51,374 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T07:34:51,390 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-13T07:34:51,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734075291390 (current time:1734075291390). 2024-12-13T07:34:51,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-13T07:34:51,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-13T07:34:51,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T07:34:51,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1deb5d74 to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@610cb425 2024-12-13T07:34:51,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60d4ba0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:34:51,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:34:51,405 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35676, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:34:51,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1deb5d74 to 127.0.0.1:59810 2024-12-13T07:34:51,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:34:51,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x31a28d57 to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@350bbada 2024-12-13T07:34:51,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@59b7a790, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:34:51,427 DEBUG [hconnection-0x4ffc7ca-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:34:51,428 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35680, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:34:51,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:34:51,431 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54440, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:34:51,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x31a28d57 to 127.0.0.1:59810 2024-12-13T07:34:51,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:34:51,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-13T07:34:51,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T07:34:51,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=107, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-13T07:34:51,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 107 2024-12-13T07:34:51,435 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-13T07:34:51,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107 2024-12-13T07:34:51,435 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T07:34:51,437 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T07:34:51,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742106_1282 (size=210) 2024-12-13T07:34:51,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742106_1282 (size=210) 2024-12-13T07:34:51,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742106_1282 (size=210) 2024-12-13T07:34:51,445 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T07:34:51,445 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=108, ppid=107, state=RUNNABLE; SnapshotRegionProcedure 2a8e991f17b65aad4b88bb288fe67388}, {pid=109, ppid=107, state=RUNNABLE; SnapshotRegionProcedure 8048339c00f55b04bd8357c5a420a285}] 2024-12-13T07:34:51,446 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=108, ppid=107, state=RUNNABLE; SnapshotRegionProcedure 2a8e991f17b65aad4b88bb288fe67388 2024-12-13T07:34:51,446 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=109, ppid=107, state=RUNNABLE; SnapshotRegionProcedure 8048339c00f55b04bd8357c5a420a285 2024-12-13T07:34:51,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107 2024-12-13T07:34:51,597 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,36021,1734075182342 2024-12-13T07:34:51,597 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,41895,1734075182430 2024-12-13T07:34:51,599 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41895 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=109 2024-12-13T07:34:51,599 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36021 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=108 2024-12-13T07:34:51,599 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1734075290403.8048339c00f55b04bd8357c5a420a285. 2024-12-13T07:34:51,599 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1734075290403.2a8e991f17b65aad4b88bb288fe67388. 2024-12-13T07:34:51,600 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing 8048339c00f55b04bd8357c5a420a285 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-13T07:34:51,600 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HRegion(2837): Flushing 2a8e991f17b65aad4b88bb288fe67388 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-13T07:34:51,620 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion/2a8e991f17b65aad4b88bb288fe67388/.tmp/cf/1041b75041e848b89d349da1244cade3 is 71, key is 02ef9c7823e0ba893058dfa7871c020f/cf:q/1734075291368/Put/seqid=0 2024-12-13T07:34:51,620 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion/8048339c00f55b04bd8357c5a420a285/.tmp/cf/5144c7510a22492a85638ec3fe19a16f is 71, key is 26b3ff0f8152f9bef15482d518b0deb9/cf:q/1734075291369/Put/seqid=0 2024-12-13T07:34:51,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742107_1283 (size=5354) 2024-12-13T07:34:51,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742107_1283 (size=5354) 2024-12-13T07:34:51,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742107_1283 (size=5354) 2024-12-13T07:34:51,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742108_1284 (size=8258) 2024-12-13T07:34:51,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742108_1284 (size=8258) 2024-12-13T07:34:51,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742108_1284 (size=8258) 2024-12-13T07:34:51,628 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion/2a8e991f17b65aad4b88bb288fe67388/.tmp/cf/1041b75041e848b89d349da1244cade3 2024-12-13T07:34:51,629 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion/8048339c00f55b04bd8357c5a420a285/.tmp/cf/5144c7510a22492a85638ec3fe19a16f 2024-12-13T07:34:51,633 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion/2a8e991f17b65aad4b88bb288fe67388/.tmp/cf/1041b75041e848b89d349da1244cade3 as hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion/2a8e991f17b65aad4b88bb288fe67388/cf/1041b75041e848b89d349da1244cade3 2024-12-13T07:34:51,633 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion/8048339c00f55b04bd8357c5a420a285/.tmp/cf/5144c7510a22492a85638ec3fe19a16f as hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion/8048339c00f55b04bd8357c5a420a285/cf/5144c7510a22492a85638ec3fe19a16f 2024-12-13T07:34:51,638 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion/8048339c00f55b04bd8357c5a420a285/cf/5144c7510a22492a85638ec3fe19a16f, entries=46, sequenceid=6, filesize=8.1 K 2024-12-13T07:34:51,638 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion/2a8e991f17b65aad4b88bb288fe67388/cf/1041b75041e848b89d349da1244cade3, entries=4, sequenceid=6, filesize=5.2 K 2024-12-13T07:34:51,638 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 8048339c00f55b04bd8357c5a420a285 in 38ms, sequenceid=6, compaction requested=false 2024-12-13T07:34:51,638 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion' 2024-12-13T07:34:51,639 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HRegion(3040): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 2a8e991f17b65aad4b88bb288fe67388 in 39ms, sequenceid=6, compaction requested=false 2024-12-13T07:34:51,639 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion' 2024-12-13T07:34:51,639 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HRegion(2538): Flush status journal for 2a8e991f17b65aad4b88bb288fe67388: 2024-12-13T07:34:51,639 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for 8048339c00f55b04bd8357c5a420a285: 2024-12-13T07:34:51,639 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1734075290403.2a8e991f17b65aad4b88bb288fe67388. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-13T07:34:51,639 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1734075290403.8048339c00f55b04bd8357c5a420a285. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-13T07:34:51,639 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1734075290403.2a8e991f17b65aad4b88bb288fe67388.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-13T07:34:51,639 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1734075290403.8048339c00f55b04bd8357c5a420a285.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-13T07:34:51,639 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T07:34:51,639 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T07:34:51,639 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion/2a8e991f17b65aad4b88bb288fe67388/cf/1041b75041e848b89d349da1244cade3] hfiles 2024-12-13T07:34:51,639 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion/8048339c00f55b04bd8357c5a420a285/cf/5144c7510a22492a85638ec3fe19a16f] hfiles 2024-12-13T07:34:51,639 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion/2a8e991f17b65aad4b88bb288fe67388/cf/1041b75041e848b89d349da1244cade3 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-13T07:34:51,639 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion/8048339c00f55b04bd8357c5a420a285/cf/5144c7510a22492a85638ec3fe19a16f for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-13T07:34:51,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742109_1285 (size=125) 2024-12-13T07:34:51,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742109_1285 (size=125) 2024-12-13T07:34:51,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742110_1286 (size=125) 2024-12-13T07:34:51,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742110_1286 (size=125) 2024-12-13T07:34:51,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742109_1285 (size=125) 2024-12-13T07:34:51,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742110_1286 (size=125) 2024-12-13T07:34:51,653 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1734075290403.2a8e991f17b65aad4b88bb288fe67388. 2024-12-13T07:34:51,653 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1734075290403.8048339c00f55b04bd8357c5a420a285. 2024-12-13T07:34:51,653 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-12-13T07:34:51,653 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=108 2024-12-13T07:34:51,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.HMaster(4106): Remote procedure done, pid=108 2024-12-13T07:34:51,654 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 2a8e991f17b65aad4b88bb288fe67388 2024-12-13T07:34:51,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-12-13T07:34:51,654 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 8048339c00f55b04bd8357c5a420a285 2024-12-13T07:34:51,654 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=108, ppid=107, state=RUNNABLE; SnapshotRegionProcedure 2a8e991f17b65aad4b88bb288fe67388 2024-12-13T07:34:51,654 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=109, ppid=107, state=RUNNABLE; SnapshotRegionProcedure 8048339c00f55b04bd8357c5a420a285 2024-12-13T07:34:51,655 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=107, state=SUCCESS; SnapshotRegionProcedure 8048339c00f55b04bd8357c5a420a285 in 209 msec 2024-12-13T07:34:51,656 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=108, resume processing ppid=107 2024-12-13T07:34:51,656 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, ppid=107, state=SUCCESS; SnapshotRegionProcedure 2a8e991f17b65aad4b88bb288fe67388 in 209 msec 2024-12-13T07:34:51,656 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T07:34:51,656 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T07:34:51,657 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T07:34:51,657 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-13T07:34:51,658 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-13T07:34:51,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742111_1287 (size=675) 2024-12-13T07:34:51,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742111_1287 (size=675) 2024-12-13T07:34:51,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742111_1287 (size=675) 2024-12-13T07:34:51,667 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T07:34:51,672 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T07:34:51,672 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-13T07:34:51,673 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T07:34:51,673 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 107 2024-12-13T07:34:51,675 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 241 msec 2024-12-13T07:34:51,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107 2024-12-13T07:34:51,739 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 107 completed 2024-12-13T07:34:51,761 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-13T07:34:51,763 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43172, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-13T07:34:51,764 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36021 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-13T07:34:51,765 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-13T07:34:51,767 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54452, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-13T07:34:51,767 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41895 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-13T07:34:51,767 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-13T07:34:51,769 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35692, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-13T07:34:51,769 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42189 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-13T07:34:51,771 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion-1', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T07:34:51,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T07:34:51,772 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T07:34:51,772 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:34:51,772 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion-1" procId is: 110 2024-12-13T07:34:51,773 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T07:34:51,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-13T07:34:51,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742112_1288 (size=399) 2024-12-13T07:34:51,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742112_1288 (size=399) 2024-12-13T07:34:51,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742112_1288 (size=399) 2024-12-13T07:34:51,781 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => ad255285098bb47b349dd9eae190ab75, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291770.ad255285098bb47b349dd9eae190ab75.', STARTKEY => '', ENDKEY => '2'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:34:51,782 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 9621f34eb1025da4db4761db2e5003d8, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1734075291770.9621f34eb1025da4db4761db2e5003d8.', STARTKEY => '2', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:34:51,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742113_1289 (size=85) 2024-12-13T07:34:51,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742113_1289 (size=85) 2024-12-13T07:34:51,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742114_1290 (size=85) 2024-12-13T07:34:51,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742113_1289 (size=85) 2024-12-13T07:34:51,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742114_1290 (size=85) 2024-12-13T07:34:51,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742114_1290 (size=85) 2024-12-13T07:34:51,788 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291770.ad255285098bb47b349dd9eae190ab75.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:34:51,788 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1681): Closing ad255285098bb47b349dd9eae190ab75, disabling compactions & flushes 2024-12-13T07:34:51,788 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291770.ad255285098bb47b349dd9eae190ab75. 2024-12-13T07:34:51,789 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291770.ad255285098bb47b349dd9eae190ab75. 2024-12-13T07:34:51,789 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291770.ad255285098bb47b349dd9eae190ab75. after waiting 0 ms 2024-12-13T07:34:51,789 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291770.ad255285098bb47b349dd9eae190ab75. 2024-12-13T07:34:51,789 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291770.ad255285098bb47b349dd9eae190ab75. 2024-12-13T07:34:51,789 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1635): Region close journal for ad255285098bb47b349dd9eae190ab75: 2024-12-13T07:34:51,789 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1734075291770.9621f34eb1025da4db4761db2e5003d8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:34:51,789 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1681): Closing 9621f34eb1025da4db4761db2e5003d8, disabling compactions & flushes 2024-12-13T07:34:51,789 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1734075291770.9621f34eb1025da4db4761db2e5003d8. 2024-12-13T07:34:51,789 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1734075291770.9621f34eb1025da4db4761db2e5003d8. 2024-12-13T07:34:51,789 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1734075291770.9621f34eb1025da4db4761db2e5003d8. after waiting 0 ms 2024-12-13T07:34:51,789 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1734075291770.9621f34eb1025da4db4761db2e5003d8. 2024-12-13T07:34:51,789 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1734075291770.9621f34eb1025da4db4761db2e5003d8. 2024-12-13T07:34:51,789 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1635): Region close journal for 9621f34eb1025da4db4761db2e5003d8: 2024-12-13T07:34:51,790 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T07:34:51,791 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291770.ad255285098bb47b349dd9eae190ab75.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1734075291791"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734075291791"}]},"ts":"1734075291791"} 2024-12-13T07:34:51,791 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1734075291770.9621f34eb1025da4db4761db2e5003d8.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1734075291791"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734075291791"}]},"ts":"1734075291791"} 2024-12-13T07:34:51,793 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-13T07:34:51,794 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T07:34:51,794 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075291794"}]},"ts":"1734075291794"} 2024-12-13T07:34:51,795 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLING in hbase:meta 2024-12-13T07:34:51,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-13T07:34:51,881 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(202): Hosts are {9eb75de94ef3=0} racks are {/default-rack=0} 2024-12-13T07:34:51,882 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-13T07:34:51,882 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-13T07:34:51,882 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-13T07:34:51,882 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-13T07:34:51,882 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-13T07:34:51,882 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-13T07:34:51,882 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-13T07:34:51,883 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=ad255285098bb47b349dd9eae190ab75, ASSIGN}, {pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=9621f34eb1025da4db4761db2e5003d8, ASSIGN}] 2024-12-13T07:34:51,884 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=9621f34eb1025da4db4761db2e5003d8, ASSIGN 2024-12-13T07:34:51,884 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=ad255285098bb47b349dd9eae190ab75, ASSIGN 2024-12-13T07:34:51,884 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=9621f34eb1025da4db4761db2e5003d8, ASSIGN; state=OFFLINE, location=9eb75de94ef3,41895,1734075182430; forceNewPlan=false, retain=false 2024-12-13T07:34:51,884 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=ad255285098bb47b349dd9eae190ab75, ASSIGN; state=OFFLINE, location=9eb75de94ef3,42189,1734075182526; forceNewPlan=false, retain=false 2024-12-13T07:34:52,021 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T07:34:52,021 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion Metrics about Tables on a single HBase RegionServer 2024-12-13T07:34:52,022 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-13T07:34:52,035 INFO [9eb75de94ef3:33713 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-13T07:34:52,035 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=112 updating hbase:meta row=9621f34eb1025da4db4761db2e5003d8, regionState=OPENING, regionLocation=9eb75de94ef3,41895,1734075182430 2024-12-13T07:34:52,035 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=111 updating hbase:meta row=ad255285098bb47b349dd9eae190ab75, regionState=OPENING, regionLocation=9eb75de94ef3,42189,1734075182526 2024-12-13T07:34:52,036 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; OpenRegionProcedure 9621f34eb1025da4db4761db2e5003d8, server=9eb75de94ef3,41895,1734075182430}] 2024-12-13T07:34:52,037 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=114, ppid=111, state=RUNNABLE; OpenRegionProcedure ad255285098bb47b349dd9eae190ab75, server=9eb75de94ef3,42189,1734075182526}] 2024-12-13T07:34:52,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-13T07:34:52,188 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,41895,1734075182430 2024-12-13T07:34:52,188 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,42189,1734075182526 2024-12-13T07:34:52,191 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion-1,2,1734075291770.9621f34eb1025da4db4761db2e5003d8. 2024-12-13T07:34:52,191 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7285): Opening region: {ENCODED => 9621f34eb1025da4db4761db2e5003d8, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1734075291770.9621f34eb1025da4db4761db2e5003d8.', STARTKEY => '2', ENDKEY => ''} 2024-12-13T07:34:52,192 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291770.ad255285098bb47b349dd9eae190ab75. 2024-12-13T07:34:52,192 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(7285): Opening region: {ENCODED => ad255285098bb47b349dd9eae190ab75, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291770.ad255285098bb47b349dd9eae190ab75.', STARTKEY => '', ENDKEY => '2'} 2024-12-13T07:34:52,192 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1734075291770.9621f34eb1025da4db4761db2e5003d8. service=AccessControlService 2024-12-13T07:34:52,192 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291770.ad255285098bb47b349dd9eae190ab75. service=AccessControlService 2024-12-13T07:34:52,192 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T07:34:52,192 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 9621f34eb1025da4db4761db2e5003d8 2024-12-13T07:34:52,192 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T07:34:52,192 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1734075291770.9621f34eb1025da4db4761db2e5003d8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:34:52,192 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7327): checking encryption for 9621f34eb1025da4db4761db2e5003d8 2024-12-13T07:34:52,192 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 ad255285098bb47b349dd9eae190ab75 2024-12-13T07:34:52,192 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7330): checking classloading for 9621f34eb1025da4db4761db2e5003d8 2024-12-13T07:34:52,192 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291770.ad255285098bb47b349dd9eae190ab75.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:34:52,192 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(7327): checking encryption for ad255285098bb47b349dd9eae190ab75 2024-12-13T07:34:52,192 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(7330): checking classloading for ad255285098bb47b349dd9eae190ab75 2024-12-13T07:34:52,194 INFO [StoreOpener-9621f34eb1025da4db4761db2e5003d8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 9621f34eb1025da4db4761db2e5003d8 2024-12-13T07:34:52,194 INFO [StoreOpener-ad255285098bb47b349dd9eae190ab75-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ad255285098bb47b349dd9eae190ab75 2024-12-13T07:34:52,196 INFO [StoreOpener-9621f34eb1025da4db4761db2e5003d8-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9621f34eb1025da4db4761db2e5003d8 columnFamilyName cf 2024-12-13T07:34:52,196 INFO [StoreOpener-ad255285098bb47b349dd9eae190ab75-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ad255285098bb47b349dd9eae190ab75 columnFamilyName cf 2024-12-13T07:34:52,196 DEBUG [StoreOpener-9621f34eb1025da4db4761db2e5003d8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:34:52,196 DEBUG [StoreOpener-ad255285098bb47b349dd9eae190ab75-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:34:52,196 INFO [StoreOpener-ad255285098bb47b349dd9eae190ab75-1 {}] regionserver.HStore(327): Store=ad255285098bb47b349dd9eae190ab75/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T07:34:52,196 INFO [StoreOpener-9621f34eb1025da4db4761db2e5003d8-1 {}] regionserver.HStore(327): Store=9621f34eb1025da4db4761db2e5003d8/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T07:34:52,197 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ad255285098bb47b349dd9eae190ab75 2024-12-13T07:34:52,198 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9621f34eb1025da4db4761db2e5003d8 2024-12-13T07:34:52,198 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ad255285098bb47b349dd9eae190ab75 2024-12-13T07:34:52,198 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9621f34eb1025da4db4761db2e5003d8 2024-12-13T07:34:52,200 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(1085): writing seq id for 9621f34eb1025da4db4761db2e5003d8 2024-12-13T07:34:52,200 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(1085): writing seq id for ad255285098bb47b349dd9eae190ab75 2024-12-13T07:34:52,202 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9621f34eb1025da4db4761db2e5003d8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T07:34:52,202 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ad255285098bb47b349dd9eae190ab75/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T07:34:52,202 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(1102): Opened 9621f34eb1025da4db4761db2e5003d8; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73857271, jitterRate=0.10055910050868988}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T07:34:52,202 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(1102): Opened ad255285098bb47b349dd9eae190ab75; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72870435, jitterRate=0.08585409820079803}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T07:34:52,203 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(1001): Region open journal for 9621f34eb1025da4db4761db2e5003d8: 2024-12-13T07:34:52,203 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(1001): Region open journal for ad255285098bb47b349dd9eae190ab75: 2024-12-13T07:34:52,203 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,2,1734075291770.9621f34eb1025da4db4761db2e5003d8., pid=113, masterSystemTime=1734075292188 2024-12-13T07:34:52,203 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291770.ad255285098bb47b349dd9eae190ab75., pid=114, masterSystemTime=1734075292188 2024-12-13T07:34:52,205 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291770.ad255285098bb47b349dd9eae190ab75. 2024-12-13T07:34:52,205 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291770.ad255285098bb47b349dd9eae190ab75. 2024-12-13T07:34:52,205 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=111 updating hbase:meta row=ad255285098bb47b349dd9eae190ab75, regionState=OPEN, openSeqNum=2, regionLocation=9eb75de94ef3,42189,1734075182526 2024-12-13T07:34:52,205 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,2,1734075291770.9621f34eb1025da4db4761db2e5003d8. 2024-12-13T07:34:52,205 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion-1,2,1734075291770.9621f34eb1025da4db4761db2e5003d8. 2024-12-13T07:34:52,205 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=112 updating hbase:meta row=9621f34eb1025da4db4761db2e5003d8, regionState=OPEN, openSeqNum=2, regionLocation=9eb75de94ef3,41895,1734075182430 2024-12-13T07:34:52,207 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=114, resume processing ppid=111 2024-12-13T07:34:52,208 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, ppid=111, state=SUCCESS; OpenRegionProcedure ad255285098bb47b349dd9eae190ab75, server=9eb75de94ef3,42189,1734075182526 in 169 msec 2024-12-13T07:34:52,208 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-12-13T07:34:52,208 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=ad255285098bb47b349dd9eae190ab75, ASSIGN in 324 msec 2024-12-13T07:34:52,208 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; OpenRegionProcedure 9621f34eb1025da4db4761db2e5003d8, server=9eb75de94ef3,41895,1734075182430 in 171 msec 2024-12-13T07:34:52,209 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=112, resume processing ppid=110 2024-12-13T07:34:52,209 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, ppid=110, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=9621f34eb1025da4db4761db2e5003d8, ASSIGN in 325 msec 2024-12-13T07:34:52,209 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T07:34:52,209 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075292209"}]},"ts":"1734075292209"} 2024-12-13T07:34:52,210 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLED in hbase:meta 2024-12-13T07:34:52,220 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T07:34:52,220 DEBUG [PEWorker-3 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion-1 jenkins: RWXCA 2024-12-13T07:34:52,222 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41895 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-13T07:34:52,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:34:52,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:34:52,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:34:52,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:34:52,240 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-13T07:34:52,240 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-13T07:34:52,241 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-13T07:34:52,241 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-13T07:34:52,241 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-13T07:34:52,241 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-13T07:34:52,241 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-13T07:34:52,241 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-13T07:34:52,241 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 468 msec 2024-12-13T07:34:52,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-13T07:34:52,378 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 110 completed 2024-12-13T07:34:52,399 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster$2(2219): Client=jenkins//172.17.0.2 merge regions [ad255285098bb47b349dd9eae190ab75, 9621f34eb1025da4db4761db2e5003d8] 2024-12-13T07:34:52,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=115, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[ad255285098bb47b349dd9eae190ab75, 9621f34eb1025da4db4761db2e5003d8], force=true 2024-12-13T07:34:52,404 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=115, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[ad255285098bb47b349dd9eae190ab75, 9621f34eb1025da4db4761db2e5003d8], force=true 2024-12-13T07:34:52,404 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=115, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[ad255285098bb47b349dd9eae190ab75, 9621f34eb1025da4db4761db2e5003d8], force=true 2024-12-13T07:34:52,404 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=115, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[ad255285098bb47b349dd9eae190ab75, 9621f34eb1025da4db4761db2e5003d8], force=true 2024-12-13T07:34:52,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-13T07:34:52,416 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=116, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=ad255285098bb47b349dd9eae190ab75, UNASSIGN}, {pid=117, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=9621f34eb1025da4db4761db2e5003d8, UNASSIGN}] 2024-12-13T07:34:52,416 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=116, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=ad255285098bb47b349dd9eae190ab75, UNASSIGN 2024-12-13T07:34:52,416 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=117, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=9621f34eb1025da4db4761db2e5003d8, UNASSIGN 2024-12-13T07:34:52,417 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=116 updating hbase:meta row=ad255285098bb47b349dd9eae190ab75, regionState=CLOSING, regionLocation=9eb75de94ef3,42189,1734075182526 2024-12-13T07:34:52,417 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=117 updating hbase:meta row=9621f34eb1025da4db4761db2e5003d8, regionState=CLOSING, regionLocation=9eb75de94ef3,41895,1734075182430 2024-12-13T07:34:52,418 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-13T07:34:52,418 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=118, ppid=117, state=RUNNABLE; CloseRegionProcedure 9621f34eb1025da4db4761db2e5003d8, server=9eb75de94ef3,41895,1734075182430}] 2024-12-13T07:34:52,418 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-13T07:34:52,419 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=116, state=RUNNABLE; CloseRegionProcedure ad255285098bb47b349dd9eae190ab75, server=9eb75de94ef3,42189,1734075182526}] 2024-12-13T07:34:52,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-13T07:34:52,570 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,41895,1734075182430 2024-12-13T07:34:52,570 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,42189,1734075182526 2024-12-13T07:34:52,570 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] handler.UnassignRegionHandler(124): Close 9621f34eb1025da4db4761db2e5003d8 2024-12-13T07:34:52,570 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(124): Close ad255285098bb47b349dd9eae190ab75 2024-12-13T07:34:52,570 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(138): Unassign region: split region: true: evictCache: true 2024-12-13T07:34:52,570 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] handler.UnassignRegionHandler(138): Unassign region: split region: true: evictCache: true 2024-12-13T07:34:52,570 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1681): Closing ad255285098bb47b349dd9eae190ab75, disabling compactions & flushes 2024-12-13T07:34:52,570 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1681): Closing 9621f34eb1025da4db4761db2e5003d8, disabling compactions & flushes 2024-12-13T07:34:52,571 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1734075291770.9621f34eb1025da4db4761db2e5003d8. 2024-12-13T07:34:52,571 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291770.ad255285098bb47b349dd9eae190ab75. 2024-12-13T07:34:52,571 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1734075291770.9621f34eb1025da4db4761db2e5003d8. 2024-12-13T07:34:52,571 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291770.ad255285098bb47b349dd9eae190ab75. 2024-12-13T07:34:52,571 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1734075291770.9621f34eb1025da4db4761db2e5003d8. after waiting 0 ms 2024-12-13T07:34:52,571 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291770.ad255285098bb47b349dd9eae190ab75. after waiting 0 ms 2024-12-13T07:34:52,571 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291770.ad255285098bb47b349dd9eae190ab75. 2024-12-13T07:34:52,571 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1734075291770.9621f34eb1025da4db4761db2e5003d8. 2024-12-13T07:34:52,571 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(2837): Flushing ad255285098bb47b349dd9eae190ab75 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-13T07:34:52,571 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(2837): Flushing 9621f34eb1025da4db4761db2e5003d8 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-13T07:34:52,585 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9621f34eb1025da4db4761db2e5003d8/.tmp/cf/60b89b87ee48482a8212c102e9dbf69f is 28, key is 2/cf:/1734075292386/Put/seqid=0 2024-12-13T07:34:52,585 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ad255285098bb47b349dd9eae190ab75/.tmp/cf/24e23b0f728f4780b814af881d6cdce9 is 28, key is 1/cf:/1734075292382/Put/seqid=0 2024-12-13T07:34:52,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742115_1291 (size=4945) 2024-12-13T07:34:52,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742116_1292 (size=4945) 2024-12-13T07:34:52,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742115_1291 (size=4945) 2024-12-13T07:34:52,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742116_1292 (size=4945) 2024-12-13T07:34:52,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742115_1291 (size=4945) 2024-12-13T07:34:52,592 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ad255285098bb47b349dd9eae190ab75/.tmp/cf/24e23b0f728f4780b814af881d6cdce9 2024-12-13T07:34:52,595 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9621f34eb1025da4db4761db2e5003d8/.tmp/cf/60b89b87ee48482a8212c102e9dbf69f 2024-12-13T07:34:52,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742116_1292 (size=4945) 2024-12-13T07:34:52,597 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ad255285098bb47b349dd9eae190ab75/.tmp/cf/24e23b0f728f4780b814af881d6cdce9 as hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ad255285098bb47b349dd9eae190ab75/cf/24e23b0f728f4780b814af881d6cdce9 2024-12-13T07:34:52,600 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9621f34eb1025da4db4761db2e5003d8/.tmp/cf/60b89b87ee48482a8212c102e9dbf69f as hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9621f34eb1025da4db4761db2e5003d8/cf/60b89b87ee48482a8212c102e9dbf69f 2024-12-13T07:34:52,602 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ad255285098bb47b349dd9eae190ab75/cf/24e23b0f728f4780b814af881d6cdce9, entries=1, sequenceid=5, filesize=4.8 K 2024-12-13T07:34:52,603 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for ad255285098bb47b349dd9eae190ab75 in 32ms, sequenceid=5, compaction requested=false 2024-12-13T07:34:52,603 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1' 2024-12-13T07:34:52,606 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9621f34eb1025da4db4761db2e5003d8/cf/60b89b87ee48482a8212c102e9dbf69f, entries=1, sequenceid=5, filesize=4.8 K 2024-12-13T07:34:52,607 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(3040): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 9621f34eb1025da4db4761db2e5003d8 in 36ms, sequenceid=5, compaction requested=false 2024-12-13T07:34:52,607 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ad255285098bb47b349dd9eae190ab75/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-13T07:34:52,608 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T07:34:52,608 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291770.ad255285098bb47b349dd9eae190ab75. 2024-12-13T07:34:52,608 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1635): Region close journal for ad255285098bb47b349dd9eae190ab75: 2024-12-13T07:34:52,609 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(170): Closed ad255285098bb47b349dd9eae190ab75 2024-12-13T07:34:52,610 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=116 updating hbase:meta row=ad255285098bb47b349dd9eae190ab75, regionState=CLOSED 2024-12-13T07:34:52,612 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=116 2024-12-13T07:34:52,613 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=116, state=SUCCESS; CloseRegionProcedure ad255285098bb47b349dd9eae190ab75, server=9eb75de94ef3,42189,1734075182526 in 192 msec 2024-12-13T07:34:52,613 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, ppid=115, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=ad255285098bb47b349dd9eae190ab75, UNASSIGN in 197 msec 2024-12-13T07:34:52,615 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9621f34eb1025da4db4761db2e5003d8/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-13T07:34:52,615 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T07:34:52,615 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1734075291770.9621f34eb1025da4db4761db2e5003d8. 2024-12-13T07:34:52,615 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1635): Region close journal for 9621f34eb1025da4db4761db2e5003d8: 2024-12-13T07:34:52,617 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] handler.UnassignRegionHandler(170): Closed 9621f34eb1025da4db4761db2e5003d8 2024-12-13T07:34:52,617 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=117 updating hbase:meta row=9621f34eb1025da4db4761db2e5003d8, regionState=CLOSED 2024-12-13T07:34:52,620 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=118, resume processing ppid=117 2024-12-13T07:34:52,620 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, ppid=117, state=SUCCESS; CloseRegionProcedure 9621f34eb1025da4db4761db2e5003d8, server=9eb75de94ef3,41895,1734075182430 in 200 msec 2024-12-13T07:34:52,621 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=115 2024-12-13T07:34:52,621 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=115, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=9621f34eb1025da4db4761db2e5003d8, UNASSIGN in 205 msec 2024-12-13T07:34:52,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742117_1293 (size=84) 2024-12-13T07:34:52,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742117_1293 (size=84) 2024-12-13T07:34:52,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742117_1293 (size=84) 2024-12-13T07:34:52,647 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:34:52,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742118_1294 (size=20) 2024-12-13T07:34:52,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742118_1294 (size=20) 2024-12-13T07:34:52,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742118_1294 (size=20) 2024-12-13T07:34:52,659 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:34:52,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742119_1295 (size=21) 2024-12-13T07:34:52,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742119_1295 (size=21) 2024-12-13T07:34:52,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742119_1295 (size=21) 2024-12-13T07:34:52,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742120_1296 (size=84) 2024-12-13T07:34:52,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742120_1296 (size=84) 2024-12-13T07:34:52,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742120_1296 (size=84) 2024-12-13T07:34:52,673 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:34:52,685 DEBUG [PEWorker-2 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/339c036edb1d57cbb52ae3ff753d0f92/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=-1 2024-12-13T07:34:52,687 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291770.ad255285098bb47b349dd9eae190ab75.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-13T07:34:52,687 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1734075291770.9621f34eb1025da4db4761db2e5003d8.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-13T07:34:52,687 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":7,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291771.339c036edb1d57cbb52ae3ff753d0f92.","families":{"info":[{"qualifier":"regioninfo","vlen":83,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0000","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0001","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-13T07:34:52,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-13T07:34:52,712 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=120, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=339c036edb1d57cbb52ae3ff753d0f92, ASSIGN}] 2024-12-13T07:34:52,713 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=120, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=339c036edb1d57cbb52ae3ff753d0f92, ASSIGN 2024-12-13T07:34:52,713 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=120, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=339c036edb1d57cbb52ae3ff753d0f92, ASSIGN; state=MERGED, location=9eb75de94ef3,42189,1734075182526; forceNewPlan=false, retain=false 2024-12-13T07:34:52,864 INFO [9eb75de94ef3:33713 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-13T07:34:52,864 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=120 updating hbase:meta row=339c036edb1d57cbb52ae3ff753d0f92, regionState=OPENING, regionLocation=9eb75de94ef3,42189,1734075182526 2024-12-13T07:34:52,866 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; OpenRegionProcedure 339c036edb1d57cbb52ae3ff753d0f92, server=9eb75de94ef3,42189,1734075182526}] 2024-12-13T07:34:53,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-13T07:34:53,018 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,42189,1734075182526 2024-12-13T07:34:53,022 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291771.339c036edb1d57cbb52ae3ff753d0f92. 2024-12-13T07:34:53,022 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7285): Opening region: {ENCODED => 339c036edb1d57cbb52ae3ff753d0f92, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291771.339c036edb1d57cbb52ae3ff753d0f92.', STARTKEY => '', ENDKEY => ''} 2024-12-13T07:34:53,022 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291771.339c036edb1d57cbb52ae3ff753d0f92. service=AccessControlService 2024-12-13T07:34:53,022 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T07:34:53,022 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 339c036edb1d57cbb52ae3ff753d0f92 2024-12-13T07:34:53,022 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291771.339c036edb1d57cbb52ae3ff753d0f92.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:34:53,022 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7327): checking encryption for 339c036edb1d57cbb52ae3ff753d0f92 2024-12-13T07:34:53,022 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7330): checking classloading for 339c036edb1d57cbb52ae3ff753d0f92 2024-12-13T07:34:53,024 INFO [StoreOpener-339c036edb1d57cbb52ae3ff753d0f92-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 339c036edb1d57cbb52ae3ff753d0f92 2024-12-13T07:34:53,025 INFO [StoreOpener-339c036edb1d57cbb52ae3ff753d0f92-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 339c036edb1d57cbb52ae3ff753d0f92 columnFamilyName cf 2024-12-13T07:34:53,025 DEBUG [StoreOpener-339c036edb1d57cbb52ae3ff753d0f92-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:34:53,042 DEBUG [StoreOpener-339c036edb1d57cbb52ae3ff753d0f92-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/339c036edb1d57cbb52ae3ff753d0f92/cf/24e23b0f728f4780b814af881d6cdce9.ad255285098bb47b349dd9eae190ab75->hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ad255285098bb47b349dd9eae190ab75/cf/24e23b0f728f4780b814af881d6cdce9-top 2024-12-13T07:34:53,047 DEBUG [StoreOpener-339c036edb1d57cbb52ae3ff753d0f92-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/339c036edb1d57cbb52ae3ff753d0f92/cf/60b89b87ee48482a8212c102e9dbf69f.9621f34eb1025da4db4761db2e5003d8->hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9621f34eb1025da4db4761db2e5003d8/cf/60b89b87ee48482a8212c102e9dbf69f-top 2024-12-13T07:34:53,048 INFO [StoreOpener-339c036edb1d57cbb52ae3ff753d0f92-1 {}] regionserver.HStore(327): Store=339c036edb1d57cbb52ae3ff753d0f92/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T07:34:53,049 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/339c036edb1d57cbb52ae3ff753d0f92 2024-12-13T07:34:53,050 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/339c036edb1d57cbb52ae3ff753d0f92 2024-12-13T07:34:53,052 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(1085): writing seq id for 339c036edb1d57cbb52ae3ff753d0f92 2024-12-13T07:34:53,053 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(1102): Opened 339c036edb1d57cbb52ae3ff753d0f92; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72472555, jitterRate=0.07992522418498993}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T07:34:53,054 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(1001): Region open journal for 339c036edb1d57cbb52ae3ff753d0f92: 2024-12-13T07:34:53,054 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291771.339c036edb1d57cbb52ae3ff753d0f92., pid=121, masterSystemTime=1734075293018 2024-12-13T07:34:53,055 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.CompactSplit(342): Ignoring compaction request for testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291771.339c036edb1d57cbb52ae3ff753d0f92.,because compaction is disabled. 2024-12-13T07:34:53,056 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291771.339c036edb1d57cbb52ae3ff753d0f92. 2024-12-13T07:34:53,057 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291771.339c036edb1d57cbb52ae3ff753d0f92. 2024-12-13T07:34:53,057 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=120 updating hbase:meta row=339c036edb1d57cbb52ae3ff753d0f92, regionState=OPEN, openSeqNum=9, regionLocation=9eb75de94ef3,42189,1734075182526 2024-12-13T07:34:53,059 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-12-13T07:34:53,059 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; OpenRegionProcedure 339c036edb1d57cbb52ae3ff753d0f92, server=9eb75de94ef3,42189,1734075182526 in 192 msec 2024-12-13T07:34:53,060 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=120, resume processing ppid=115 2024-12-13T07:34:53,061 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, ppid=115, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=339c036edb1d57cbb52ae3ff753d0f92, ASSIGN in 347 msec 2024-12-13T07:34:53,061 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, state=SUCCESS; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[ad255285098bb47b349dd9eae190ab75, 9621f34eb1025da4db4761db2e5003d8], force=true in 660 msec 2024-12-13T07:34:53,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-13T07:34:53,510 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: MERGE_REGIONS, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 115 completed 2024-12-13T07:34:53,512 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-13T07:34:53,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734075293512 (current time:1734075293512). 2024-12-13T07:34:53,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-13T07:34:53,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 VERSION not specified, setting to 2 2024-12-13T07:34:53,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T07:34:53,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2423a43e to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@377e153 2024-12-13T07:34:53,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65ceccd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:34:53,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:34:53,749 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35696, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:34:53,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2423a43e to 127.0.0.1:59810 2024-12-13T07:34:53,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:34:53,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5dd26bea to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6193747a 2024-12-13T07:34:54,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c8d601, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:34:54,139 DEBUG [hconnection-0x2d84eecc-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:34:54,141 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35706, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:34:54,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:34:54,145 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54468, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:34:54,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5dd26bea to 127.0.0.1:59810 2024-12-13T07:34:54,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:34:54,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-13T07:34:54,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T07:34:54,278 INFO [AsyncFSWAL-0-hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/MasterData-prefix:9eb75de94ef3,33713,1734075181583 {}] wal.AbstractFSWAL(1183): Slow sync cost: 130 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40823,DS-addf9c7a-b733-4006-9870-a44eb3beb221,DISK], DatanodeInfoWithStorage[127.0.0.1:34409,DS-76d4a548-41e1-4f97-8915-a3f81cb2319e,DISK], DatanodeInfoWithStorage[127.0.0.1:33299,DS-3366fd89-6dd0-4619-a690-b626ebfdf962,DISK]] 2024-12-13T07:34:54,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-13T07:34:54,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-13T07:34:54,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-13T07:34:54,280 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-13T07:34:54,281 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T07:34:54,284 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T07:34:54,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742121_1297 (size=216) 2024-12-13T07:34:54,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742121_1297 (size=216) 2024-12-13T07:34:54,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742121_1297 (size=216) 2024-12-13T07:34:54,292 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T07:34:54,292 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; SnapshotRegionProcedure 339c036edb1d57cbb52ae3ff753d0f92}] 2024-12-13T07:34:54,293 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=123, ppid=122, state=RUNNABLE; SnapshotRegionProcedure 339c036edb1d57cbb52ae3ff753d0f92 2024-12-13T07:34:54,324 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0005_000001 (auth:SIMPLE) from 127.0.0.1:39752 2024-12-13T07:34:54,334 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-1_3/usercache/jenkins/appcache/application_1734075189398_0005/container_1734075189398_0005_01_000001/launch_container.sh] 2024-12-13T07:34:54,334 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-1_3/usercache/jenkins/appcache/application_1734075189398_0005/container_1734075189398_0005_01_000001/container_tokens] 2024-12-13T07:34:54,334 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-1_3/usercache/jenkins/appcache/application_1734075189398_0005/container_1734075189398_0005_01_000001/sysfs] 2024-12-13T07:34:54,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-13T07:34:54,444 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,42189,1734075182526 2024-12-13T07:34:54,445 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42189 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=123 2024-12-13T07:34:54,445 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291771.339c036edb1d57cbb52ae3ff753d0f92. 2024-12-13T07:34:54,446 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2538): Flush status journal for 339c036edb1d57cbb52ae3ff753d0f92: 2024-12-13T07:34:54,446 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291771.339c036edb1d57cbb52ae3ff753d0f92. for snaptb0-testExportFileSystemStateWithMergeRegion-1 completed. 2024-12-13T07:34:54,446 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291771.339c036edb1d57cbb52ae3ff753d0f92.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-13T07:34:54,446 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T07:34:54,447 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/339c036edb1d57cbb52ae3ff753d0f92/cf/24e23b0f728f4780b814af881d6cdce9.ad255285098bb47b349dd9eae190ab75->hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ad255285098bb47b349dd9eae190ab75/cf/24e23b0f728f4780b814af881d6cdce9-top, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/339c036edb1d57cbb52ae3ff753d0f92/cf/60b89b87ee48482a8212c102e9dbf69f.9621f34eb1025da4db4761db2e5003d8->hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9621f34eb1025da4db4761db2e5003d8/cf/60b89b87ee48482a8212c102e9dbf69f-top] hfiles 2024-12-13T07:34:54,447 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(265): Adding reference for file (1/2): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/339c036edb1d57cbb52ae3ff753d0f92/cf/24e23b0f728f4780b814af881d6cdce9.ad255285098bb47b349dd9eae190ab75 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-13T07:34:54,449 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(265): Adding reference for file (2/2): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/339c036edb1d57cbb52ae3ff753d0f92/cf/60b89b87ee48482a8212c102e9dbf69f.9621f34eb1025da4db4761db2e5003d8 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-13T07:34:54,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742122_1298 (size=269) 2024-12-13T07:34:54,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742122_1298 (size=269) 2024-12-13T07:34:54,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742122_1298 (size=269) 2024-12-13T07:34:54,459 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291771.339c036edb1d57cbb52ae3ff753d0f92. 2024-12-13T07:34:54,459 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-12-13T07:34:54,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster(4106): Remote procedure done, pid=123 2024-12-13T07:34:54,459 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 on region 339c036edb1d57cbb52ae3ff753d0f92 2024-12-13T07:34:54,460 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=123, ppid=122, state=RUNNABLE; SnapshotRegionProcedure 339c036edb1d57cbb52ae3ff753d0f92 2024-12-13T07:34:54,462 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-12-13T07:34:54,462 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T07:34:54,462 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; SnapshotRegionProcedure 339c036edb1d57cbb52ae3ff753d0f92 in 168 msec 2024-12-13T07:34:54,463 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T07:34:54,463 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T07:34:54,463 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-13T07:34:54,464 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-13T07:34:54,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742123_1299 (size=670) 2024-12-13T07:34:54,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742123_1299 (size=670) 2024-12-13T07:34:54,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742123_1299 (size=670) 2024-12-13T07:34:54,472 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T07:34:54,478 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T07:34:54,478 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-13T07:34:54,479 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T07:34:54,480 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-13T07:34:54,480 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } in 332 msec 2024-12-13T07:34:54,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-13T07:34:54,583 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 122 completed 2024-12-13T07:34:54,583 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075294583 2024-12-13T07:34:54,583 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:41477, tgtDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075294583, rawTgtDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075294583, srcFsUri=hdfs://localhost:41477, srcDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:34:54,611 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:41477, inputRoot=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:34:54,611 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-313103365_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075294583, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075294583/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-13T07:34:54,613 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-13T07:34:54,617 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075294583/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-13T07:34:54,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742124_1300 (size=216) 2024-12-13T07:34:54,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742124_1300 (size=216) 2024-12-13T07:34:54,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742125_1301 (size=670) 2024-12-13T07:34:54,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742124_1300 (size=216) 2024-12-13T07:34:54,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742125_1301 (size=670) 2024-12-13T07:34:54,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742125_1301 (size=670) 2024-12-13T07:34:54,628 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-13T07:34:54,628 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-13T07:34:54,628 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-13T07:34:54,629 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-13T07:34:55,493 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/hadoop-10207463681074285922.jar 2024-12-13T07:34:55,494 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-13T07:34:55,494 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-13T07:34:55,552 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/hadoop-8757241163822310744.jar 2024-12-13T07:34:55,552 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-13T07:34:55,552 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-13T07:34:55,552 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-13T07:34:55,553 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-13T07:34:55,553 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-13T07:34:55,553 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-13T07:34:55,553 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-13T07:34:55,553 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-13T07:34:55,554 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-13T07:34:55,554 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-13T07:34:55,554 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-13T07:34:55,554 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-13T07:34:55,554 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-13T07:34:55,554 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-13T07:34:55,555 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-13T07:34:55,555 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-13T07:34:55,555 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-13T07:34:55,555 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-13T07:34:55,556 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T07:34:55,556 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T07:34:55,556 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T07:34:55,556 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T07:34:55,556 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T07:34:55,556 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T07:34:55,557 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T07:34:55,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742126_1302 (size=127628) 2024-12-13T07:34:55,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742126_1302 (size=127628) 2024-12-13T07:34:55,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742126_1302 (size=127628) 2024-12-13T07:34:55,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742127_1303 (size=2172137) 2024-12-13T07:34:55,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742127_1303 (size=2172137) 2024-12-13T07:34:55,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742127_1303 (size=2172137) 2024-12-13T07:34:55,624 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-13T07:34:55,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742128_1304 (size=213228) 2024-12-13T07:34:55,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742128_1304 (size=213228) 2024-12-13T07:34:55,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742128_1304 (size=213228) 2024-12-13T07:34:55,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742129_1305 (size=1877034) 2024-12-13T07:34:55,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742129_1305 (size=1877034) 2024-12-13T07:34:55,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742129_1305 (size=1877034) 2024-12-13T07:34:55,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742130_1306 (size=533455) 2024-12-13T07:34:55,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742130_1306 (size=533455) 2024-12-13T07:34:55,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742130_1306 (size=533455) 2024-12-13T07:34:55,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742131_1307 (size=7280644) 2024-12-13T07:34:55,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742131_1307 (size=7280644) 2024-12-13T07:34:55,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742131_1307 (size=7280644) 2024-12-13T07:34:55,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742132_1308 (size=4188619) 2024-12-13T07:34:55,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742132_1308 (size=4188619) 2024-12-13T07:34:55,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742132_1308 (size=4188619) 2024-12-13T07:34:55,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742133_1309 (size=6350899) 2024-12-13T07:34:55,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742133_1309 (size=6350899) 2024-12-13T07:34:55,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742133_1309 (size=6350899) 2024-12-13T07:34:55,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742134_1310 (size=20406) 2024-12-13T07:34:55,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742134_1310 (size=20406) 2024-12-13T07:34:55,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742134_1310 (size=20406) 2024-12-13T07:34:55,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742135_1311 (size=75495) 2024-12-13T07:34:55,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742135_1311 (size=75495) 2024-12-13T07:34:55,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742135_1311 (size=75495) 2024-12-13T07:34:55,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742136_1312 (size=45609) 2024-12-13T07:34:55,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742136_1312 (size=45609) 2024-12-13T07:34:55,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742136_1312 (size=45609) 2024-12-13T07:34:55,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742137_1313 (size=110084) 2024-12-13T07:34:55,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742137_1313 (size=110084) 2024-12-13T07:34:55,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742137_1313 (size=110084) 2024-12-13T07:34:55,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742138_1314 (size=1323991) 2024-12-13T07:34:55,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742138_1314 (size=1323991) 2024-12-13T07:34:55,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742138_1314 (size=1323991) 2024-12-13T07:34:55,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742139_1315 (size=23076) 2024-12-13T07:34:55,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742139_1315 (size=23076) 2024-12-13T07:34:55,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742139_1315 (size=23076) 2024-12-13T07:34:55,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742140_1316 (size=126803) 2024-12-13T07:34:55,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742140_1316 (size=126803) 2024-12-13T07:34:55,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742140_1316 (size=126803) 2024-12-13T07:34:55,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742141_1317 (size=322274) 2024-12-13T07:34:55,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742141_1317 (size=322274) 2024-12-13T07:34:55,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742141_1317 (size=322274) 2024-12-13T07:34:55,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742142_1318 (size=451756) 2024-12-13T07:34:55,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742142_1318 (size=451756) 2024-12-13T07:34:55,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742142_1318 (size=451756) 2024-12-13T07:34:55,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742143_1319 (size=1832290) 2024-12-13T07:34:55,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742143_1319 (size=1832290) 2024-12-13T07:34:55,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742143_1319 (size=1832290) 2024-12-13T07:34:56,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742144_1320 (size=30081) 2024-12-13T07:34:56,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742144_1320 (size=30081) 2024-12-13T07:34:56,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742144_1320 (size=30081) 2024-12-13T07:34:56,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742145_1321 (size=53616) 2024-12-13T07:34:56,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742145_1321 (size=53616) 2024-12-13T07:34:56,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742145_1321 (size=53616) 2024-12-13T07:34:56,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742146_1322 (size=29229) 2024-12-13T07:34:56,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742146_1322 (size=29229) 2024-12-13T07:34:56,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742146_1322 (size=29229) 2024-12-13T07:34:56,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742147_1323 (size=169089) 2024-12-13T07:34:56,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742147_1323 (size=169089) 2024-12-13T07:34:56,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742147_1323 (size=169089) 2024-12-13T07:34:56,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742148_1324 (size=5175431) 2024-12-13T07:34:56,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742148_1324 (size=5175431) 2024-12-13T07:34:56,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742148_1324 (size=5175431) 2024-12-13T07:34:56,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742149_1325 (size=136454) 2024-12-13T07:34:56,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742149_1325 (size=136454) 2024-12-13T07:34:56,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742149_1325 (size=136454) 2024-12-13T07:34:56,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742150_1326 (size=907854) 2024-12-13T07:34:56,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742150_1326 (size=907854) 2024-12-13T07:34:56,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742150_1326 (size=907854) 2024-12-13T07:34:56,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742151_1327 (size=3317408) 2024-12-13T07:34:56,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742151_1327 (size=3317408) 2024-12-13T07:34:56,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742151_1327 (size=3317408) 2024-12-13T07:34:56,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742152_1328 (size=503880) 2024-12-13T07:34:56,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742152_1328 (size=503880) 2024-12-13T07:34:56,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742152_1328 (size=503880) 2024-12-13T07:34:56,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742153_1329 (size=4695811) 2024-12-13T07:34:56,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742153_1329 (size=4695811) 2024-12-13T07:34:56,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742153_1329 (size=4695811) 2024-12-13T07:34:56,365 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-13T07:34:56,367 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportFileSystemStateWithMergeRegion-1' hfile list 2024-12-13T07:34:56,368 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=9.7 K 2024-12-13T07:34:56,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742154_1330 (size=378) 2024-12-13T07:34:56,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742154_1330 (size=378) 2024-12-13T07:34:56,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742154_1330 (size=378) 2024-12-13T07:34:56,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742155_1331 (size=15) 2024-12-13T07:34:56,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742155_1331 (size=15) 2024-12-13T07:34:56,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742155_1331 (size=15) 2024-12-13T07:34:56,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742156_1332 (size=304942) 2024-12-13T07:34:56,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742156_1332 (size=304942) 2024-12-13T07:34:56,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742156_1332 (size=304942) 2024-12-13T07:34:56,409 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-13T07:34:56,409 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-13T07:34:56,888 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0006_000001 (auth:SIMPLE) from 127.0.0.1:42588 2024-12-13T07:35:00,631 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T07:35:01,604 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0006_000001 (auth:SIMPLE) from 127.0.0.1:59262 2024-12-13T07:35:01,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742157_1333 (size=350616) 2024-12-13T07:35:01,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742157_1333 (size=350616) 2024-12-13T07:35:02,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742157_1333 (size=350616) 2024-12-13T07:35:02,021 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T07:35:02,021 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 Metrics about Tables on a single HBase RegionServer 2024-12-13T07:35:03,854 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0006_000001 (auth:SIMPLE) from 127.0.0.1:55898 2024-12-13T07:35:05,801 DEBUG [master/9eb75de94ef3:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 2a8e991f17b65aad4b88bb288fe67388 changed from -1.0 to 0.0, refreshing cache 2024-12-13T07:35:05,801 DEBUG [master/9eb75de94ef3:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 8048339c00f55b04bd8357c5a420a285 changed from -1.0 to 0.0, refreshing cache 2024-12-13T07:35:06,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742158_1334 (size=4945) 2024-12-13T07:35:06,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742158_1334 (size=4945) 2024-12-13T07:35:06,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742158_1334 (size=4945) 2024-12-13T07:35:06,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742159_1335 (size=4945) 2024-12-13T07:35:06,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742159_1335 (size=4945) 2024-12-13T07:35:06,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742159_1335 (size=4945) 2024-12-13T07:35:06,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742160_1336 (size=17474) 2024-12-13T07:35:06,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742160_1336 (size=17474) 2024-12-13T07:35:06,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742160_1336 (size=17474) 2024-12-13T07:35:06,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742161_1337 (size=482) 2024-12-13T07:35:06,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742161_1337 (size=482) 2024-12-13T07:35:06,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742161_1337 (size=482) 2024-12-13T07:35:06,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742162_1338 (size=17474) 2024-12-13T07:35:06,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742162_1338 (size=17474) 2024-12-13T07:35:06,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742162_1338 (size=17474) 2024-12-13T07:35:06,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742163_1339 (size=350616) 2024-12-13T07:35:06,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742163_1339 (size=350616) 2024-12-13T07:35:06,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742163_1339 (size=350616) 2024-12-13T07:35:06,594 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-0_3/usercache/jenkins/appcache/application_1734075189398_0006/container_1734075189398_0006_01_000002/launch_container.sh] 2024-12-13T07:35:06,594 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-0_3/usercache/jenkins/appcache/application_1734075189398_0006/container_1734075189398_0006_01_000002/container_tokens] 2024-12-13T07:35:06,595 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-0_3/usercache/jenkins/appcache/application_1734075189398_0006/container_1734075189398_0006_01_000002/sysfs] 2024-12-13T07:35:06,598 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0006_000001 (auth:SIMPLE) from 127.0.0.1:55900 2024-12-13T07:35:07,849 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-13T07:35:07,849 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-13T07:35:07,856 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-13T07:35:07,856 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-13T07:35:07,856 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-13T07:35:07,857 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-313103365_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-13T07:35:07,857 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-13T07:35:07,857 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-13T07:35:07,857 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-313103365_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075294583/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075294583/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-13T07:35:07,857 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075294583/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-13T07:35:07,857 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075294583/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-13T07:35:07,864 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T07:35:07,864 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T07:35:07,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T07:35:07,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-13T07:35:07,866 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075307866"}]},"ts":"1734075307866"} 2024-12-13T07:35:07,867 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLING in hbase:meta 2024-12-13T07:35:07,914 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLING 2024-12-13T07:35:07,915 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1}] 2024-12-13T07:35:07,917 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=339c036edb1d57cbb52ae3ff753d0f92, UNASSIGN}] 2024-12-13T07:35:07,920 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=339c036edb1d57cbb52ae3ff753d0f92, UNASSIGN 2024-12-13T07:35:07,921 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=126 updating hbase:meta row=339c036edb1d57cbb52ae3ff753d0f92, regionState=CLOSING, regionLocation=9eb75de94ef3,42189,1734075182526 2024-12-13T07:35:07,922 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T07:35:07,922 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; CloseRegionProcedure 339c036edb1d57cbb52ae3ff753d0f92, server=9eb75de94ef3,42189,1734075182526}] 2024-12-13T07:35:07,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-13T07:35:08,074 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,42189,1734075182526 2024-12-13T07:35:08,074 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] handler.UnassignRegionHandler(124): Close 339c036edb1d57cbb52ae3ff753d0f92 2024-12-13T07:35:08,074 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T07:35:08,075 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1681): Closing 339c036edb1d57cbb52ae3ff753d0f92, disabling compactions & flushes 2024-12-13T07:35:08,075 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291771.339c036edb1d57cbb52ae3ff753d0f92. 2024-12-13T07:35:08,075 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291771.339c036edb1d57cbb52ae3ff753d0f92. 2024-12-13T07:35:08,075 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291771.339c036edb1d57cbb52ae3ff753d0f92. after waiting 0 ms 2024-12-13T07:35:08,075 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291771.339c036edb1d57cbb52ae3ff753d0f92. 2024-12-13T07:35:08,079 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/339c036edb1d57cbb52ae3ff753d0f92/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=8 2024-12-13T07:35:08,080 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T07:35:08,080 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291771.339c036edb1d57cbb52ae3ff753d0f92. 2024-12-13T07:35:08,080 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1635): Region close journal for 339c036edb1d57cbb52ae3ff753d0f92: 2024-12-13T07:35:08,082 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] handler.UnassignRegionHandler(170): Closed 339c036edb1d57cbb52ae3ff753d0f92 2024-12-13T07:35:08,083 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=126 updating hbase:meta row=339c036edb1d57cbb52ae3ff753d0f92, regionState=CLOSED 2024-12-13T07:35:08,087 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-12-13T07:35:08,088 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; CloseRegionProcedure 339c036edb1d57cbb52ae3ff753d0f92, server=9eb75de94ef3,42189,1734075182526 in 164 msec 2024-12-13T07:35:08,089 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=126, resume processing ppid=125 2024-12-13T07:35:08,089 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, ppid=125, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=339c036edb1d57cbb52ae3ff753d0f92, UNASSIGN in 170 msec 2024-12-13T07:35:08,091 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-12-13T07:35:08,091 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 174 msec 2024-12-13T07:35:08,092 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075308092"}]},"ts":"1734075308092"} 2024-12-13T07:35:08,094 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLED in hbase:meta 2024-12-13T07:35:08,102 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLED 2024-12-13T07:35:08,104 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 239 msec 2024-12-13T07:35:08,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-13T07:35:08,169 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 124 completed 2024-12-13T07:35:08,170 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T07:35:08,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T07:35:08,171 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=128, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T07:35:08,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T07:35:08,174 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41895 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T07:35:08,174 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=128, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T07:35:08,177 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/339c036edb1d57cbb52ae3ff753d0f92 2024-12-13T07:35:08,177 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9621f34eb1025da4db4761db2e5003d8 2024-12-13T07:35:08,177 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ad255285098bb47b349dd9eae190ab75 2024-12-13T07:35:08,178 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/339c036edb1d57cbb52ae3ff753d0f92/cf, FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/339c036edb1d57cbb52ae3ff753d0f92/recovered.edits] 2024-12-13T07:35:08,178 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ad255285098bb47b349dd9eae190ab75/cf, FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ad255285098bb47b349dd9eae190ab75/recovered.edits] 2024-12-13T07:35:08,178 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9621f34eb1025da4db4761db2e5003d8/cf, FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9621f34eb1025da4db4761db2e5003d8/recovered.edits] 2024-12-13T07:35:08,183 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9621f34eb1025da4db4761db2e5003d8/cf/60b89b87ee48482a8212c102e9dbf69f to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9621f34eb1025da4db4761db2e5003d8/cf/60b89b87ee48482a8212c102e9dbf69f 2024-12-13T07:35:08,184 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ad255285098bb47b349dd9eae190ab75/cf/24e23b0f728f4780b814af881d6cdce9 to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ad255285098bb47b349dd9eae190ab75/cf/24e23b0f728f4780b814af881d6cdce9 2024-12-13T07:35:08,184 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/339c036edb1d57cbb52ae3ff753d0f92/cf/24e23b0f728f4780b814af881d6cdce9.ad255285098bb47b349dd9eae190ab75 to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/339c036edb1d57cbb52ae3ff753d0f92/cf/24e23b0f728f4780b814af881d6cdce9.ad255285098bb47b349dd9eae190ab75 2024-12-13T07:35:08,184 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/339c036edb1d57cbb52ae3ff753d0f92/cf/60b89b87ee48482a8212c102e9dbf69f.9621f34eb1025da4db4761db2e5003d8 to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/339c036edb1d57cbb52ae3ff753d0f92/cf/60b89b87ee48482a8212c102e9dbf69f.9621f34eb1025da4db4761db2e5003d8 2024-12-13T07:35:08,187 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9621f34eb1025da4db4761db2e5003d8/recovered.edits/8.seqid to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9621f34eb1025da4db4761db2e5003d8/recovered.edits/8.seqid 2024-12-13T07:35:08,188 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/339c036edb1d57cbb52ae3ff753d0f92/recovered.edits/12.seqid to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/339c036edb1d57cbb52ae3ff753d0f92/recovered.edits/12.seqid 2024-12-13T07:35:08,188 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9621f34eb1025da4db4761db2e5003d8 2024-12-13T07:35:08,189 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ad255285098bb47b349dd9eae190ab75/recovered.edits/8.seqid to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ad255285098bb47b349dd9eae190ab75/recovered.edits/8.seqid 2024-12-13T07:35:08,189 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/339c036edb1d57cbb52ae3ff753d0f92 2024-12-13T07:35:08,189 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T07:35:08,189 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T07:35:08,189 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ad255285098bb47b349dd9eae190ab75 2024-12-13T07:35:08,189 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T07:35:08,189 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T07:35:08,189 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion-1 regions 2024-12-13T07:35:08,190 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-13T07:35:08,190 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-13T07:35:08,190 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-13T07:35:08,190 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-13T07:35:08,192 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=128, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T07:35:08,196 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of testtb-testExportFileSystemStateWithMergeRegion-1 from hbase:meta 2024-12-13T07:35:08,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T07:35:08,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T07:35:08,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T07:35:08,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:35:08,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:35:08,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T07:35:08,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:35:08,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:35:08,198 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' descriptor. 2024-12-13T07:35:08,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-13T07:35:08,199 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-13T07:35:08,199 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-13T07:35:08,200 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-13T07:35:08,200 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=128, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T07:35:08,200 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' from region states. 2024-12-13T07:35:08,201 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291771.339c036edb1d57cbb52ae3ff753d0f92.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734075308200"}]},"ts":"9223372036854775807"} 2024-12-13T07:35:08,201 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-13T07:35:08,203 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-13T07:35:08,203 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 339c036edb1d57cbb52ae3ff753d0f92, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291771.339c036edb1d57cbb52ae3ff753d0f92.', STARTKEY => '', ENDKEY => ''}] 2024-12-13T07:35:08,203 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemStateWithMergeRegion-1' as deleted. 2024-12-13T07:35:08,203 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734075308203"}]},"ts":"9223372036854775807"} 2024-12-13T07:35:08,210 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportFileSystemStateWithMergeRegion-1 state from META 2024-12-13T07:35:08,220 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=128, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T07:35:08,222 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 50 msec 2024-12-13T07:35:08,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-13T07:35:08,301 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 128 completed 2024-12-13T07:35:08,301 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T07:35:08,301 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T07:35:08,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=129, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T07:35:08,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129 2024-12-13T07:35:08,305 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075308305"}]},"ts":"1734075308305"} 2024-12-13T07:35:08,306 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLING in hbase:meta 2024-12-13T07:35:08,342 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLING 2024-12-13T07:35:08,343 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=130, ppid=129, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion}] 2024-12-13T07:35:08,348 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=2a8e991f17b65aad4b88bb288fe67388, UNASSIGN}, {pid=132, ppid=130, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=8048339c00f55b04bd8357c5a420a285, UNASSIGN}] 2024-12-13T07:35:08,349 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=132, ppid=130, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=8048339c00f55b04bd8357c5a420a285, UNASSIGN 2024-12-13T07:35:08,349 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=131, ppid=130, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=2a8e991f17b65aad4b88bb288fe67388, UNASSIGN 2024-12-13T07:35:08,350 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=132 updating hbase:meta row=8048339c00f55b04bd8357c5a420a285, regionState=CLOSING, regionLocation=9eb75de94ef3,41895,1734075182430 2024-12-13T07:35:08,350 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=131 updating hbase:meta row=2a8e991f17b65aad4b88bb288fe67388, regionState=CLOSING, regionLocation=9eb75de94ef3,36021,1734075182342 2024-12-13T07:35:08,352 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T07:35:08,352 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=131, state=RUNNABLE; CloseRegionProcedure 2a8e991f17b65aad4b88bb288fe67388, server=9eb75de94ef3,36021,1734075182342}] 2024-12-13T07:35:08,353 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T07:35:08,353 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=134, ppid=132, state=RUNNABLE; CloseRegionProcedure 8048339c00f55b04bd8357c5a420a285, server=9eb75de94ef3,41895,1734075182430}] 2024-12-13T07:35:08,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129 2024-12-13T07:35:08,505 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,41895,1734075182430 2024-12-13T07:35:08,505 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,36021,1734075182342 2024-12-13T07:35:08,505 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(124): Close 2a8e991f17b65aad4b88bb288fe67388 2024-12-13T07:35:08,505 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] handler.UnassignRegionHandler(124): Close 8048339c00f55b04bd8357c5a420a285 2024-12-13T07:35:08,505 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T07:35:08,505 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T07:35:08,506 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1681): Closing 8048339c00f55b04bd8357c5a420a285, disabling compactions & flushes 2024-12-13T07:35:08,506 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1681): Closing 2a8e991f17b65aad4b88bb288fe67388, disabling compactions & flushes 2024-12-13T07:35:08,506 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1734075290403.8048339c00f55b04bd8357c5a420a285. 2024-12-13T07:35:08,506 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1734075290403.2a8e991f17b65aad4b88bb288fe67388. 2024-12-13T07:35:08,506 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1734075290403.8048339c00f55b04bd8357c5a420a285. 2024-12-13T07:35:08,506 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1734075290403.2a8e991f17b65aad4b88bb288fe67388. 2024-12-13T07:35:08,506 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1734075290403.8048339c00f55b04bd8357c5a420a285. after waiting 0 ms 2024-12-13T07:35:08,506 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1734075290403.2a8e991f17b65aad4b88bb288fe67388. after waiting 0 ms 2024-12-13T07:35:08,506 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1734075290403.8048339c00f55b04bd8357c5a420a285. 2024-12-13T07:35:08,506 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1734075290403.2a8e991f17b65aad4b88bb288fe67388. 2024-12-13T07:35:08,510 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion/8048339c00f55b04bd8357c5a420a285/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T07:35:08,511 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T07:35:08,511 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1734075290403.8048339c00f55b04bd8357c5a420a285. 2024-12-13T07:35:08,511 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1635): Region close journal for 8048339c00f55b04bd8357c5a420a285: 2024-12-13T07:35:08,512 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion/2a8e991f17b65aad4b88bb288fe67388/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T07:35:08,513 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T07:35:08,513 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion,,1734075290403.2a8e991f17b65aad4b88bb288fe67388. 2024-12-13T07:35:08,513 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1635): Region close journal for 2a8e991f17b65aad4b88bb288fe67388: 2024-12-13T07:35:08,513 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] handler.UnassignRegionHandler(170): Closed 8048339c00f55b04bd8357c5a420a285 2024-12-13T07:35:08,514 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=132 updating hbase:meta row=8048339c00f55b04bd8357c5a420a285, regionState=CLOSED 2024-12-13T07:35:08,515 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(170): Closed 2a8e991f17b65aad4b88bb288fe67388 2024-12-13T07:35:08,516 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=131 updating hbase:meta row=2a8e991f17b65aad4b88bb288fe67388, regionState=CLOSED 2024-12-13T07:35:08,517 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=134, resume processing ppid=132 2024-12-13T07:35:08,520 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, ppid=130, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=8048339c00f55b04bd8357c5a420a285, UNASSIGN in 169 msec 2024-12-13T07:35:08,520 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, ppid=132, state=SUCCESS; CloseRegionProcedure 8048339c00f55b04bd8357c5a420a285, server=9eb75de94ef3,41895,1734075182430 in 162 msec 2024-12-13T07:35:08,521 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=131 2024-12-13T07:35:08,521 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=131, state=SUCCESS; CloseRegionProcedure 2a8e991f17b65aad4b88bb288fe67388, server=9eb75de94ef3,36021,1734075182342 in 165 msec 2024-12-13T07:35:08,522 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-12-13T07:35:08,522 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=2a8e991f17b65aad4b88bb288fe67388, UNASSIGN in 173 msec 2024-12-13T07:35:08,524 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=130, resume processing ppid=129 2024-12-13T07:35:08,524 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, ppid=129, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 179 msec 2024-12-13T07:35:08,525 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075308525"}]},"ts":"1734075308525"} 2024-12-13T07:35:08,526 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLED in hbase:meta 2024-12-13T07:35:08,539 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLED 2024-12-13T07:35:08,541 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, state=SUCCESS; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 239 msec 2024-12-13T07:35:08,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129 2024-12-13T07:35:08,607 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 129 completed 2024-12-13T07:35:08,607 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T07:35:08,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=135, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T07:35:08,609 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=135, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T07:35:08,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T07:35:08,610 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=135, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T07:35:08,611 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41895 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T07:35:08,613 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion/8048339c00f55b04bd8357c5a420a285 2024-12-13T07:35:08,613 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion/2a8e991f17b65aad4b88bb288fe67388 2024-12-13T07:35:08,614 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion/8048339c00f55b04bd8357c5a420a285/cf, FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion/8048339c00f55b04bd8357c5a420a285/recovered.edits] 2024-12-13T07:35:08,614 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion/2a8e991f17b65aad4b88bb288fe67388/cf, FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion/2a8e991f17b65aad4b88bb288fe67388/recovered.edits] 2024-12-13T07:35:08,618 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion/2a8e991f17b65aad4b88bb288fe67388/cf/1041b75041e848b89d349da1244cade3 to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/2a8e991f17b65aad4b88bb288fe67388/cf/1041b75041e848b89d349da1244cade3 2024-12-13T07:35:08,619 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion/8048339c00f55b04bd8357c5a420a285/cf/5144c7510a22492a85638ec3fe19a16f to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/8048339c00f55b04bd8357c5a420a285/cf/5144c7510a22492a85638ec3fe19a16f 2024-12-13T07:35:08,622 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion/2a8e991f17b65aad4b88bb288fe67388/recovered.edits/9.seqid to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/2a8e991f17b65aad4b88bb288fe67388/recovered.edits/9.seqid 2024-12-13T07:35:08,622 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion/8048339c00f55b04bd8357c5a420a285/recovered.edits/9.seqid to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/8048339c00f55b04bd8357c5a420a285/recovered.edits/9.seqid 2024-12-13T07:35:08,622 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion/2a8e991f17b65aad4b88bb288fe67388 2024-12-13T07:35:08,622 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithMergeRegion/8048339c00f55b04bd8357c5a420a285 2024-12-13T07:35:08,622 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion regions 2024-12-13T07:35:08,624 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=135, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T07:35:08,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T07:35:08,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T07:35:08,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T07:35:08,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T07:35:08,627 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithMergeRegion from hbase:meta 2024-12-13T07:35:08,628 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-13T07:35:08,628 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-13T07:35:08,628 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-13T07:35:08,630 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemStateWithMergeRegion' descriptor. 2024-12-13T07:35:08,631 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-13T07:35:08,631 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=135, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T07:35:08,631 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemStateWithMergeRegion' from region states. 2024-12-13T07:35:08,631 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1734075290403.2a8e991f17b65aad4b88bb288fe67388.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734075308631"}]},"ts":"9223372036854775807"} 2024-12-13T07:35:08,632 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1734075290403.8048339c00f55b04bd8357c5a420a285.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734075308631"}]},"ts":"9223372036854775807"} 2024-12-13T07:35:08,633 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-13T07:35:08,633 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 2a8e991f17b65aad4b88bb288fe67388, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1734075290403.2a8e991f17b65aad4b88bb288fe67388.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 8048339c00f55b04bd8357c5a420a285, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1734075290403.8048339c00f55b04bd8357c5a420a285.', STARTKEY => '1', ENDKEY => ''}] 2024-12-13T07:35:08,634 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemStateWithMergeRegion' as deleted. 2024-12-13T07:35:08,634 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734075308634"}]},"ts":"9223372036854775807"} 2024-12-13T07:35:08,635 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportFileSystemStateWithMergeRegion state from META 2024-12-13T07:35:08,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T07:35:08,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T07:35:08,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:35:08,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:35:08,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T07:35:08,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:35:08,636 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T07:35:08,636 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:35:08,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-12-13T07:35:08,645 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=135, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T07:35:08,646 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, state=SUCCESS; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 38 msec 2024-12-13T07:35:08,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-12-13T07:35:08,738 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 135 completed 2024-12-13T07:35:08,746 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithMergeRegion" 2024-12-13T07:35:08,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-13T07:35:08,749 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion" 2024-12-13T07:35:08,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-13T07:35:08,752 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion-1" 2024-12-13T07:35:08,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-13T07:35:08,775 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=805 (was 800) Potentially hanging thread: hconnection-0x6218feab-shared-pool-33 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x6218feab-shared-pool-31 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34193 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x6218feab-shared-pool-29 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x6218feab-shared-pool-30 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1658146201) connection to localhost/127.0.0.1:34193 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/9eb75de94ef3:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-313103365_22 at /127.0.0.1:49152 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-313103365_22 at /127.0.0.1:57044 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/9eb75de94ef3:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x6218feab-shared-pool-32 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-4856 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: ApplicationMasterLauncher #9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/9eb75de94ef3:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x6218feab-shared-pool-28 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-313103365_22 at /127.0.0.1:35660 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 67329) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_542945426_1 at /127.0.0.1:57012 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=812 (was 807) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=475 (was 544), ProcessCount=20 (was 17) - ProcessCount LEAK? -, AvailableMemoryMB=6228 (was 6565) 2024-12-13T07:35:08,775 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=805 is superior to 500 2024-12-13T07:35:08,796 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=805, OpenFileDescriptor=812, MaxFileDescriptor=1048576, SystemLoadAverage=475, ProcessCount=20, AvailableMemoryMB=6227 2024-12-13T07:35:08,796 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=805 is superior to 500 2024-12-13T07:35:08,798 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T07:35:08,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-13T07:35:08,799 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T07:35:08,799 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:35:08,799 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportExpiredSnapshot" procId is: 136 2024-12-13T07:35:08,800 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T07:35:08,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-13T07:35:08,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742164_1340 (size=407) 2024-12-13T07:35:08,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742164_1340 (size=407) 2024-12-13T07:35:08,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742164_1340 (size=407) 2024-12-13T07:35:08,817 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 0cadc788bc38c378b787524bce4633b8, NAME => 'testtb-testExportExpiredSnapshot,1,1734075308797.0cadc788bc38c378b787524bce4633b8.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:35:08,817 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => e7d1f4a6bda4a3ce579351e28dad5381, NAME => 'testtb-testExportExpiredSnapshot,,1734075308797.e7d1f4a6bda4a3ce579351e28dad5381.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:35:08,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742165_1341 (size=68) 2024-12-13T07:35:08,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742165_1341 (size=68) 2024-12-13T07:35:08,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742165_1341 (size=68) 2024-12-13T07:35:08,830 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportExpiredSnapshot,1,1734075308797.0cadc788bc38c378b787524bce4633b8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:35:08,830 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1681): Closing 0cadc788bc38c378b787524bce4633b8, disabling compactions & flushes 2024-12-13T07:35:08,830 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportExpiredSnapshot,1,1734075308797.0cadc788bc38c378b787524bce4633b8. 2024-12-13T07:35:08,830 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportExpiredSnapshot,1,1734075308797.0cadc788bc38c378b787524bce4633b8. 2024-12-13T07:35:08,830 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportExpiredSnapshot,1,1734075308797.0cadc788bc38c378b787524bce4633b8. after waiting 0 ms 2024-12-13T07:35:08,830 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportExpiredSnapshot,1,1734075308797.0cadc788bc38c378b787524bce4633b8. 2024-12-13T07:35:08,830 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportExpiredSnapshot,1,1734075308797.0cadc788bc38c378b787524bce4633b8. 2024-12-13T07:35:08,830 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1635): Region close journal for 0cadc788bc38c378b787524bce4633b8: 2024-12-13T07:35:08,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742166_1342 (size=68) 2024-12-13T07:35:08,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742166_1342 (size=68) 2024-12-13T07:35:08,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742166_1342 (size=68) 2024-12-13T07:35:08,833 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportExpiredSnapshot,,1734075308797.e7d1f4a6bda4a3ce579351e28dad5381.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:35:08,833 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1681): Closing e7d1f4a6bda4a3ce579351e28dad5381, disabling compactions & flushes 2024-12-13T07:35:08,834 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportExpiredSnapshot,,1734075308797.e7d1f4a6bda4a3ce579351e28dad5381. 2024-12-13T07:35:08,834 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportExpiredSnapshot,,1734075308797.e7d1f4a6bda4a3ce579351e28dad5381. 2024-12-13T07:35:08,834 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportExpiredSnapshot,,1734075308797.e7d1f4a6bda4a3ce579351e28dad5381. after waiting 0 ms 2024-12-13T07:35:08,834 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportExpiredSnapshot,,1734075308797.e7d1f4a6bda4a3ce579351e28dad5381. 2024-12-13T07:35:08,834 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportExpiredSnapshot,,1734075308797.e7d1f4a6bda4a3ce579351e28dad5381. 2024-12-13T07:35:08,834 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1635): Region close journal for e7d1f4a6bda4a3ce579351e28dad5381: 2024-12-13T07:35:08,836 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T07:35:08,836 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,1,1734075308797.0cadc788bc38c378b787524bce4633b8.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1734075308836"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734075308836"}]},"ts":"1734075308836"} 2024-12-13T07:35:08,836 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,,1734075308797.e7d1f4a6bda4a3ce579351e28dad5381.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1734075308836"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734075308836"}]},"ts":"1734075308836"} 2024-12-13T07:35:08,838 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-13T07:35:08,839 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T07:35:08,839 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075308839"}]},"ts":"1734075308839"} 2024-12-13T07:35:08,840 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-13T07:35:08,856 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(202): Hosts are {9eb75de94ef3=0} racks are {/default-rack=0} 2024-12-13T07:35:08,857 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-13T07:35:08,857 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-13T07:35:08,857 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-13T07:35:08,857 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-13T07:35:08,857 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-13T07:35:08,857 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-13T07:35:08,857 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-13T07:35:08,858 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=e7d1f4a6bda4a3ce579351e28dad5381, ASSIGN}, {pid=138, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=0cadc788bc38c378b787524bce4633b8, ASSIGN}] 2024-12-13T07:35:08,859 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=137, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=e7d1f4a6bda4a3ce579351e28dad5381, ASSIGN 2024-12-13T07:35:08,859 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=138, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=0cadc788bc38c378b787524bce4633b8, ASSIGN 2024-12-13T07:35:08,859 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=137, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=e7d1f4a6bda4a3ce579351e28dad5381, ASSIGN; state=OFFLINE, location=9eb75de94ef3,42189,1734075182526; forceNewPlan=false, retain=false 2024-12-13T07:35:08,859 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=138, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=0cadc788bc38c378b787524bce4633b8, ASSIGN; state=OFFLINE, location=9eb75de94ef3,36021,1734075182342; forceNewPlan=false, retain=false 2024-12-13T07:35:08,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-13T07:35:09,010 INFO [9eb75de94ef3:33713 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-13T07:35:09,010 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=138 updating hbase:meta row=0cadc788bc38c378b787524bce4633b8, regionState=OPENING, regionLocation=9eb75de94ef3,36021,1734075182342 2024-12-13T07:35:09,010 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=137 updating hbase:meta row=e7d1f4a6bda4a3ce579351e28dad5381, regionState=OPENING, regionLocation=9eb75de94ef3,42189,1734075182526 2024-12-13T07:35:09,012 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=137, state=RUNNABLE; OpenRegionProcedure e7d1f4a6bda4a3ce579351e28dad5381, server=9eb75de94ef3,42189,1734075182526}] 2024-12-13T07:35:09,012 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=140, ppid=138, state=RUNNABLE; OpenRegionProcedure 0cadc788bc38c378b787524bce4633b8, server=9eb75de94ef3,36021,1734075182342}] 2024-12-13T07:35:09,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-13T07:35:09,163 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,42189,1734075182526 2024-12-13T07:35:09,164 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,36021,1734075182342 2024-12-13T07:35:09,167 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] handler.AssignRegionHandler(135): Open testtb-testExportExpiredSnapshot,,1734075308797.e7d1f4a6bda4a3ce579351e28dad5381. 2024-12-13T07:35:09,167 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7285): Opening region: {ENCODED => e7d1f4a6bda4a3ce579351e28dad5381, NAME => 'testtb-testExportExpiredSnapshot,,1734075308797.e7d1f4a6bda4a3ce579351e28dad5381.', STARTKEY => '', ENDKEY => '1'} 2024-12-13T07:35:09,167 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,,1734075308797.e7d1f4a6bda4a3ce579351e28dad5381. service=AccessControlService 2024-12-13T07:35:09,167 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T07:35:09,167 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] handler.AssignRegionHandler(135): Open testtb-testExportExpiredSnapshot,1,1734075308797.0cadc788bc38c378b787524bce4633b8. 2024-12-13T07:35:09,168 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(7285): Opening region: {ENCODED => 0cadc788bc38c378b787524bce4633b8, NAME => 'testtb-testExportExpiredSnapshot,1,1734075308797.0cadc788bc38c378b787524bce4633b8.', STARTKEY => '1', ENDKEY => ''} 2024-12-13T07:35:09,168 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot e7d1f4a6bda4a3ce579351e28dad5381 2024-12-13T07:35:09,168 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(894): Instantiated testtb-testExportExpiredSnapshot,,1734075308797.e7d1f4a6bda4a3ce579351e28dad5381.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:35:09,168 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7327): checking encryption for e7d1f4a6bda4a3ce579351e28dad5381 2024-12-13T07:35:09,168 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7330): checking classloading for e7d1f4a6bda4a3ce579351e28dad5381 2024-12-13T07:35:09,168 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,1,1734075308797.0cadc788bc38c378b787524bce4633b8. service=AccessControlService 2024-12-13T07:35:09,168 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T07:35:09,168 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot 0cadc788bc38c378b787524bce4633b8 2024-12-13T07:35:09,168 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(894): Instantiated testtb-testExportExpiredSnapshot,1,1734075308797.0cadc788bc38c378b787524bce4633b8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:35:09,168 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(7327): checking encryption for 0cadc788bc38c378b787524bce4633b8 2024-12-13T07:35:09,168 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(7330): checking classloading for 0cadc788bc38c378b787524bce4633b8 2024-12-13T07:35:09,170 INFO [StoreOpener-e7d1f4a6bda4a3ce579351e28dad5381-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e7d1f4a6bda4a3ce579351e28dad5381 2024-12-13T07:35:09,170 INFO [StoreOpener-0cadc788bc38c378b787524bce4633b8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 0cadc788bc38c378b787524bce4633b8 2024-12-13T07:35:09,171 INFO [StoreOpener-e7d1f4a6bda4a3ce579351e28dad5381-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e7d1f4a6bda4a3ce579351e28dad5381 columnFamilyName cf 2024-12-13T07:35:09,171 INFO [StoreOpener-0cadc788bc38c378b787524bce4633b8-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0cadc788bc38c378b787524bce4633b8 columnFamilyName cf 2024-12-13T07:35:09,172 DEBUG [StoreOpener-e7d1f4a6bda4a3ce579351e28dad5381-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:35:09,172 DEBUG [StoreOpener-0cadc788bc38c378b787524bce4633b8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:35:09,172 INFO [StoreOpener-0cadc788bc38c378b787524bce4633b8-1 {}] regionserver.HStore(327): Store=0cadc788bc38c378b787524bce4633b8/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T07:35:09,172 INFO [StoreOpener-e7d1f4a6bda4a3ce579351e28dad5381-1 {}] regionserver.HStore(327): Store=e7d1f4a6bda4a3ce579351e28dad5381/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T07:35:09,173 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportExpiredSnapshot/0cadc788bc38c378b787524bce4633b8 2024-12-13T07:35:09,173 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportExpiredSnapshot/e7d1f4a6bda4a3ce579351e28dad5381 2024-12-13T07:35:09,173 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportExpiredSnapshot/e7d1f4a6bda4a3ce579351e28dad5381 2024-12-13T07:35:09,173 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportExpiredSnapshot/0cadc788bc38c378b787524bce4633b8 2024-12-13T07:35:09,175 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(1085): writing seq id for 0cadc788bc38c378b787524bce4633b8 2024-12-13T07:35:09,175 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(1085): writing seq id for e7d1f4a6bda4a3ce579351e28dad5381 2024-12-13T07:35:09,178 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportExpiredSnapshot/e7d1f4a6bda4a3ce579351e28dad5381/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T07:35:09,179 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportExpiredSnapshot/0cadc788bc38c378b787524bce4633b8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T07:35:09,179 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(1102): Opened e7d1f4a6bda4a3ce579351e28dad5381; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59345909, jitterRate=-0.11567704379558563}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T07:35:09,179 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(1102): Opened 0cadc788bc38c378b787524bce4633b8; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66783371, jitterRate=-0.004850223660469055}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T07:35:09,180 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(1001): Region open journal for 0cadc788bc38c378b787524bce4633b8: 2024-12-13T07:35:09,180 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(1001): Region open journal for e7d1f4a6bda4a3ce579351e28dad5381: 2024-12-13T07:35:09,180 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportExpiredSnapshot,1,1734075308797.0cadc788bc38c378b787524bce4633b8., pid=140, masterSystemTime=1734075309164 2024-12-13T07:35:09,180 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportExpiredSnapshot,,1734075308797.e7d1f4a6bda4a3ce579351e28dad5381., pid=139, masterSystemTime=1734075309163 2024-12-13T07:35:09,182 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportExpiredSnapshot,1,1734075308797.0cadc788bc38c378b787524bce4633b8. 2024-12-13T07:35:09,182 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] handler.AssignRegionHandler(164): Opened testtb-testExportExpiredSnapshot,1,1734075308797.0cadc788bc38c378b787524bce4633b8. 2024-12-13T07:35:09,182 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=138 updating hbase:meta row=0cadc788bc38c378b787524bce4633b8, regionState=OPEN, openSeqNum=2, regionLocation=9eb75de94ef3,36021,1734075182342 2024-12-13T07:35:09,182 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportExpiredSnapshot,,1734075308797.e7d1f4a6bda4a3ce579351e28dad5381. 2024-12-13T07:35:09,182 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] handler.AssignRegionHandler(164): Opened testtb-testExportExpiredSnapshot,,1734075308797.e7d1f4a6bda4a3ce579351e28dad5381. 2024-12-13T07:35:09,182 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=137 updating hbase:meta row=e7d1f4a6bda4a3ce579351e28dad5381, regionState=OPEN, openSeqNum=2, regionLocation=9eb75de94ef3,42189,1734075182526 2024-12-13T07:35:09,184 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=140, resume processing ppid=138 2024-12-13T07:35:09,185 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=137 2024-12-13T07:35:09,185 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=137, state=SUCCESS; OpenRegionProcedure e7d1f4a6bda4a3ce579351e28dad5381, server=9eb75de94ef3,42189,1734075182526 in 172 msec 2024-12-13T07:35:09,185 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, ppid=138, state=SUCCESS; OpenRegionProcedure 0cadc788bc38c378b787524bce4633b8, server=9eb75de94ef3,36021,1734075182342 in 171 msec 2024-12-13T07:35:09,185 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, ppid=136, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=0cadc788bc38c378b787524bce4633b8, ASSIGN in 326 msec 2024-12-13T07:35:09,186 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-12-13T07:35:09,186 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=e7d1f4a6bda4a3ce579351e28dad5381, ASSIGN in 327 msec 2024-12-13T07:35:09,187 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T07:35:09,187 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075309187"}]},"ts":"1734075309187"} 2024-12-13T07:35:09,188 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-13T07:35:09,232 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T07:35:09,232 DEBUG [PEWorker-3 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportExpiredSnapshot jenkins: RWXCA 2024-12-13T07:35:09,234 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41895 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-13T07:35:09,244 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:35:09,244 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:35:09,244 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:35:09,244 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:35:09,252 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T07:35:09,252 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T07:35:09,253 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T07:35:09,253 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T07:35:09,254 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; CreateTableProcedure table=testtb-testExportExpiredSnapshot in 454 msec 2024-12-13T07:35:09,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-13T07:35:09,405 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportExpiredSnapshot, procId: 136 completed 2024-12-13T07:35:09,405 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-12-13T07:35:09,406 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T07:35:09,410 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-12-13T07:35:09,411 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T07:35:09,411 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportExpiredSnapshot assigned. 2024-12-13T07:35:09,414 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-13T07:35:09,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734075309414 (current time:1734075309414). 2024-12-13T07:35:09,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-13T07:35:09,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-13T07:35:09,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T07:35:09,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c5004ea to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3e3364a0 2024-12-13T07:35:09,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34defc1c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:35:09,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:35:09,430 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54760, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:35:09,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c5004ea to 127.0.0.1:59810 2024-12-13T07:35:09,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:35:09,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4f14befd to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6aa7d5db 2024-12-13T07:35:09,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29dafa8f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:35:09,450 DEBUG [hconnection-0x2613eadf-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:35:09,451 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54770, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:35:09,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:35:09,454 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46202, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:35:09,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4f14befd to 127.0.0.1:59810 2024-12-13T07:35:09,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:35:09,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-13T07:35:09,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T07:35:09,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=141, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-13T07:35:09,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 141 2024-12-13T07:35:09,457 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-13T07:35:09,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-13T07:35:09,458 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T07:35:09,459 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T07:35:09,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742167_1343 (size=170) 2024-12-13T07:35:09,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742167_1343 (size=170) 2024-12-13T07:35:09,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742167_1343 (size=170) 2024-12-13T07:35:09,465 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T07:35:09,466 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=142, ppid=141, state=RUNNABLE; SnapshotRegionProcedure e7d1f4a6bda4a3ce579351e28dad5381}, {pid=143, ppid=141, state=RUNNABLE; SnapshotRegionProcedure 0cadc788bc38c378b787524bce4633b8}] 2024-12-13T07:35:09,466 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=143, ppid=141, state=RUNNABLE; SnapshotRegionProcedure 0cadc788bc38c378b787524bce4633b8 2024-12-13T07:35:09,466 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=142, ppid=141, state=RUNNABLE; SnapshotRegionProcedure e7d1f4a6bda4a3ce579351e28dad5381 2024-12-13T07:35:09,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-13T07:35:09,617 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,42189,1734075182526 2024-12-13T07:35:09,617 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,36021,1734075182342 2024-12-13T07:35:09,618 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42189 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=142 2024-12-13T07:35:09,618 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36021 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=143 2024-12-13T07:35:09,618 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1734075308797.e7d1f4a6bda4a3ce579351e28dad5381. 2024-12-13T07:35:09,618 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1734075308797.0cadc788bc38c378b787524bce4633b8. 2024-12-13T07:35:09,618 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2538): Flush status journal for e7d1f4a6bda4a3ce579351e28dad5381: 2024-12-13T07:35:09,618 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for 0cadc788bc38c378b787524bce4633b8: 2024-12-13T07:35:09,618 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1734075308797.e7d1f4a6bda4a3ce579351e28dad5381. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-13T07:35:09,618 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1734075308797.0cadc788bc38c378b787524bce4633b8. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-13T07:35:09,618 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1734075308797.e7d1f4a6bda4a3ce579351e28dad5381.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-13T07:35:09,618 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1734075308797.0cadc788bc38c378b787524bce4633b8.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-13T07:35:09,618 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T07:35:09,618 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T07:35:09,618 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-13T07:35:09,618 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-13T07:35:09,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742168_1344 (size=71) 2024-12-13T07:35:09,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742168_1344 (size=71) 2024-12-13T07:35:09,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742168_1344 (size=71) 2024-12-13T07:35:09,625 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1734075308797.e7d1f4a6bda4a3ce579351e28dad5381. 2024-12-13T07:35:09,625 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=142 2024-12-13T07:35:09,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster(4106): Remote procedure done, pid=142 2024-12-13T07:35:09,626 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region e7d1f4a6bda4a3ce579351e28dad5381 2024-12-13T07:35:09,626 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=142, ppid=141, state=RUNNABLE; SnapshotRegionProcedure e7d1f4a6bda4a3ce579351e28dad5381 2024-12-13T07:35:09,627 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, ppid=141, state=SUCCESS; SnapshotRegionProcedure e7d1f4a6bda4a3ce579351e28dad5381 in 162 msec 2024-12-13T07:35:09,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742169_1345 (size=71) 2024-12-13T07:35:09,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742169_1345 (size=71) 2024-12-13T07:35:09,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742169_1345 (size=71) 2024-12-13T07:35:09,633 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1734075308797.0cadc788bc38c378b787524bce4633b8. 2024-12-13T07:35:09,633 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143 2024-12-13T07:35:09,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster(4106): Remote procedure done, pid=143 2024-12-13T07:35:09,634 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region 0cadc788bc38c378b787524bce4633b8 2024-12-13T07:35:09,634 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=143, ppid=141, state=RUNNABLE; SnapshotRegionProcedure 0cadc788bc38c378b787524bce4633b8 2024-12-13T07:35:09,636 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=141 2024-12-13T07:35:09,636 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=141, state=SUCCESS; SnapshotRegionProcedure 0cadc788bc38c378b787524bce4633b8 in 169 msec 2024-12-13T07:35:09,636 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T07:35:09,636 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T07:35:09,637 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T07:35:09,637 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportExpiredSnapshot 2024-12-13T07:35:09,637 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot 2024-12-13T07:35:09,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742170_1346 (size=552) 2024-12-13T07:35:09,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742170_1346 (size=552) 2024-12-13T07:35:09,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742170_1346 (size=552) 2024-12-13T07:35:09,652 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T07:35:09,656 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T07:35:09,657 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/emptySnaptb0-testExportExpiredSnapshot 2024-12-13T07:35:09,658 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T07:35:09,658 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 141 2024-12-13T07:35:09,659 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 202 msec 2024-12-13T07:35:09,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-13T07:35:09,759 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot, procId: 141 completed 2024-12-13T07:35:09,766 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42189 {}] regionserver.HRegion(8254): writing data to region testtb-testExportExpiredSnapshot,,1734075308797.e7d1f4a6bda4a3ce579351e28dad5381. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T07:35:09,767 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36021 {}] regionserver.HRegion(8254): writing data to region testtb-testExportExpiredSnapshot,1,1734075308797.0cadc788bc38c378b787524bce4633b8. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T07:35:09,770 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-12-13T07:35:09,770 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportExpiredSnapshot,,1734075308797.e7d1f4a6bda4a3ce579351e28dad5381. 2024-12-13T07:35:09,770 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T07:35:09,781 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-13T07:35:09,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734075309781 (current time:1734075309781). 2024-12-13T07:35:09,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-13T07:35:09,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-13T07:35:09,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T07:35:09,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6637c9e6 to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5f4b92ed 2024-12-13T07:35:09,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4192a549, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:35:09,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:35:09,834 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54776, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:35:09,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6637c9e6 to 127.0.0.1:59810 2024-12-13T07:35:09,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:35:09,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3be08e31 to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@383faf5a 2024-12-13T07:35:09,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c53fdf9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:35:09,855 DEBUG [hconnection-0xf6a1916-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:35:09,856 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38308, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:35:09,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:35:09,859 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45902, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:35:09,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3be08e31 to 127.0.0.1:59810 2024-12-13T07:35:09,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:35:09,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-13T07:35:09,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T07:35:09,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-13T07:35:09,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 144 2024-12-13T07:35:09,863 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-13T07:35:09,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-13T07:35:09,864 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T07:35:09,865 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T07:35:09,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742171_1347 (size=165) 2024-12-13T07:35:09,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742171_1347 (size=165) 2024-12-13T07:35:09,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742171_1347 (size=165) 2024-12-13T07:35:09,872 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T07:35:09,872 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE; SnapshotRegionProcedure e7d1f4a6bda4a3ce579351e28dad5381}, {pid=146, ppid=144, state=RUNNABLE; SnapshotRegionProcedure 0cadc788bc38c378b787524bce4633b8}] 2024-12-13T07:35:09,872 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=145, ppid=144, state=RUNNABLE; SnapshotRegionProcedure e7d1f4a6bda4a3ce579351e28dad5381 2024-12-13T07:35:09,872 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=146, ppid=144, state=RUNNABLE; SnapshotRegionProcedure 0cadc788bc38c378b787524bce4633b8 2024-12-13T07:35:09,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-13T07:35:10,023 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,42189,1734075182526 2024-12-13T07:35:10,023 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,36021,1734075182342 2024-12-13T07:35:10,024 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36021 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=146 2024-12-13T07:35:10,024 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42189 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=145 2024-12-13T07:35:10,024 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1734075308797.e7d1f4a6bda4a3ce579351e28dad5381. 2024-12-13T07:35:10,024 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1734075308797.0cadc788bc38c378b787524bce4633b8. 2024-12-13T07:35:10,024 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegion(2837): Flushing e7d1f4a6bda4a3ce579351e28dad5381 1/1 column families, dataSize=65 B heapSize=400 B 2024-12-13T07:35:10,024 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HRegion(2837): Flushing 0cadc788bc38c378b787524bce4633b8 1/1 column families, dataSize=3.19 KB heapSize=7.14 KB 2024-12-13T07:35:10,047 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportExpiredSnapshot/e7d1f4a6bda4a3ce579351e28dad5381/.tmp/cf/544c624b79134b8c8199debd20096467 is 69, key is 05a786dff6332c8c9ba12084b2d56dc87/cf:q/1734075309766/Put/seqid=0 2024-12-13T07:35:10,047 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportExpiredSnapshot/0cadc788bc38c378b787524bce4633b8/.tmp/cf/65a8ad6172fe48c0b54770785927cde4 is 71, key is 1d020826543e8ab52ffb67f84e8c5999/cf:q/1734075309767/Put/seqid=0 2024-12-13T07:35:10,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742172_1348 (size=5149) 2024-12-13T07:35:10,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742173_1349 (size=8460) 2024-12-13T07:35:10,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742172_1348 (size=5149) 2024-12-13T07:35:10,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742173_1349 (size=8460) 2024-12-13T07:35:10,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742172_1348 (size=5149) 2024-12-13T07:35:10,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742173_1349 (size=8460) 2024-12-13T07:35:10,053 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportExpiredSnapshot/e7d1f4a6bda4a3ce579351e28dad5381/.tmp/cf/544c624b79134b8c8199debd20096467 2024-12-13T07:35:10,053 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.19 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportExpiredSnapshot/0cadc788bc38c378b787524bce4633b8/.tmp/cf/65a8ad6172fe48c0b54770785927cde4 2024-12-13T07:35:10,057 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportExpiredSnapshot/0cadc788bc38c378b787524bce4633b8/.tmp/cf/65a8ad6172fe48c0b54770785927cde4 as hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportExpiredSnapshot/0cadc788bc38c378b787524bce4633b8/cf/65a8ad6172fe48c0b54770785927cde4 2024-12-13T07:35:10,057 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportExpiredSnapshot/e7d1f4a6bda4a3ce579351e28dad5381/.tmp/cf/544c624b79134b8c8199debd20096467 as hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportExpiredSnapshot/e7d1f4a6bda4a3ce579351e28dad5381/cf/544c624b79134b8c8199debd20096467 2024-12-13T07:35:10,060 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportExpiredSnapshot/0cadc788bc38c378b787524bce4633b8/cf/65a8ad6172fe48c0b54770785927cde4, entries=49, sequenceid=6, filesize=8.3 K 2024-12-13T07:35:10,061 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportExpiredSnapshot/e7d1f4a6bda4a3ce579351e28dad5381/cf/544c624b79134b8c8199debd20096467, entries=1, sequenceid=6, filesize=5.0 K 2024-12-13T07:35:10,061 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegion(3040): Finished flush of dataSize ~65 B/65, heapSize ~384 B/384, currentSize=0 B/0 for e7d1f4a6bda4a3ce579351e28dad5381 in 37ms, sequenceid=6, compaction requested=false 2024-12-13T07:35:10,061 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HRegion(3040): Finished flush of dataSize ~3.19 KB/3271, heapSize ~7.13 KB/7296, currentSize=0 B/0 for 0cadc788bc38c378b787524bce4633b8 in 37ms, sequenceid=6, compaction requested=false 2024-12-13T07:35:10,061 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-12-13T07:35:10,061 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-12-13T07:35:10,062 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HRegion(2538): Flush status journal for 0cadc788bc38c378b787524bce4633b8: 2024-12-13T07:35:10,062 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegion(2538): Flush status journal for e7d1f4a6bda4a3ce579351e28dad5381: 2024-12-13T07:35:10,062 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1734075308797.e7d1f4a6bda4a3ce579351e28dad5381. for snaptb0-testExportExpiredSnapshot completed. 2024-12-13T07:35:10,062 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1734075308797.0cadc788bc38c378b787524bce4633b8. for snaptb0-testExportExpiredSnapshot completed. 2024-12-13T07:35:10,062 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1734075308797.0cadc788bc38c378b787524bce4633b8.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-13T07:35:10,062 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1734075308797.e7d1f4a6bda4a3ce579351e28dad5381.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-13T07:35:10,062 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T07:35:10,062 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T07:35:10,062 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportExpiredSnapshot/0cadc788bc38c378b787524bce4633b8/cf/65a8ad6172fe48c0b54770785927cde4] hfiles 2024-12-13T07:35:10,062 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportExpiredSnapshot/e7d1f4a6bda4a3ce579351e28dad5381/cf/544c624b79134b8c8199debd20096467] hfiles 2024-12-13T07:35:10,062 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportExpiredSnapshot/0cadc788bc38c378b787524bce4633b8/cf/65a8ad6172fe48c0b54770785927cde4 for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-13T07:35:10,062 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportExpiredSnapshot/e7d1f4a6bda4a3ce579351e28dad5381/cf/544c624b79134b8c8199debd20096467 for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-13T07:35:10,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742175_1351 (size=110) 2024-12-13T07:35:10,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742174_1350 (size=110) 2024-12-13T07:35:10,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742175_1351 (size=110) 2024-12-13T07:35:10,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742175_1351 (size=110) 2024-12-13T07:35:10,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742174_1350 (size=110) 2024-12-13T07:35:10,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742174_1350 (size=110) 2024-12-13T07:35:10,069 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1734075308797.0cadc788bc38c378b787524bce4633b8. 2024-12-13T07:35:10,069 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1734075308797.e7d1f4a6bda4a3ce579351e28dad5381. 2024-12-13T07:35:10,069 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=146 2024-12-13T07:35:10,069 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=145 2024-12-13T07:35:10,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster(4106): Remote procedure done, pid=146 2024-12-13T07:35:10,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.HMaster(4106): Remote procedure done, pid=145 2024-12-13T07:35:10,069 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region e7d1f4a6bda4a3ce579351e28dad5381 2024-12-13T07:35:10,069 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region 0cadc788bc38c378b787524bce4633b8 2024-12-13T07:35:10,070 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=145, ppid=144, state=RUNNABLE; SnapshotRegionProcedure e7d1f4a6bda4a3ce579351e28dad5381 2024-12-13T07:35:10,070 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=146, ppid=144, state=RUNNABLE; SnapshotRegionProcedure 0cadc788bc38c378b787524bce4633b8 2024-12-13T07:35:10,071 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, ppid=144, state=SUCCESS; SnapshotRegionProcedure 0cadc788bc38c378b787524bce4633b8 in 198 msec 2024-12-13T07:35:10,072 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-12-13T07:35:10,072 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; SnapshotRegionProcedure e7d1f4a6bda4a3ce579351e28dad5381 in 198 msec 2024-12-13T07:35:10,072 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T07:35:10,072 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T07:35:10,072 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T07:35:10,072 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportExpiredSnapshot 2024-12-13T07:35:10,073 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot 2024-12-13T07:35:10,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742176_1352 (size=630) 2024-12-13T07:35:10,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742176_1352 (size=630) 2024-12-13T07:35:10,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742176_1352 (size=630) 2024-12-13T07:35:10,082 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T07:35:10,086 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T07:35:10,087 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testExportExpiredSnapshot 2024-12-13T07:35:10,088 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T07:35:10,088 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 144 2024-12-13T07:35:10,089 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 227 msec 2024-12-13T07:35:10,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-13T07:35:10,168 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot, procId: 144 completed 2024-12-13T07:35:10,171 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T07:35:10,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=147, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testExportExpiredSnapshot 2024-12-13T07:35:10,174 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T07:35:10,174 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:35:10,174 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportExpiredSnapshot" procId is: 147 2024-12-13T07:35:10,176 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T07:35:10,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-13T07:35:10,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742177_1353 (size=400) 2024-12-13T07:35:10,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742177_1353 (size=400) 2024-12-13T07:35:10,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742177_1353 (size=400) 2024-12-13T07:35:10,189 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => aa4b6ee25986a85d04a0682e5b7b9bda, NAME => 'testExportExpiredSnapshot,1,1734075310170.aa4b6ee25986a85d04a0682e5b7b9bda.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:35:10,189 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 9e44ea00e032406763f2d24d62083e47, NAME => 'testExportExpiredSnapshot,,1734075310170.9e44ea00e032406763f2d24d62083e47.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:35:10,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742179_1355 (size=61) 2024-12-13T07:35:10,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742179_1355 (size=61) 2024-12-13T07:35:10,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742179_1355 (size=61) 2024-12-13T07:35:10,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742178_1354 (size=61) 2024-12-13T07:35:10,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742178_1354 (size=61) 2024-12-13T07:35:10,197 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(894): Instantiated testExportExpiredSnapshot,,1734075310170.9e44ea00e032406763f2d24d62083e47.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:35:10,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742178_1354 (size=61) 2024-12-13T07:35:10,197 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1681): Closing 9e44ea00e032406763f2d24d62083e47, disabling compactions & flushes 2024-12-13T07:35:10,198 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1703): Closing region testExportExpiredSnapshot,,1734075310170.9e44ea00e032406763f2d24d62083e47. 2024-12-13T07:35:10,198 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportExpiredSnapshot,,1734075310170.9e44ea00e032406763f2d24d62083e47. 2024-12-13T07:35:10,198 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testExportExpiredSnapshot,,1734075310170.9e44ea00e032406763f2d24d62083e47. after waiting 0 ms 2024-12-13T07:35:10,198 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testExportExpiredSnapshot,,1734075310170.9e44ea00e032406763f2d24d62083e47. 2024-12-13T07:35:10,198 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1922): Closed testExportExpiredSnapshot,,1734075310170.9e44ea00e032406763f2d24d62083e47. 2024-12-13T07:35:10,198 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1635): Region close journal for 9e44ea00e032406763f2d24d62083e47: 2024-12-13T07:35:10,198 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(894): Instantiated testExportExpiredSnapshot,1,1734075310170.aa4b6ee25986a85d04a0682e5b7b9bda.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:35:10,198 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1681): Closing aa4b6ee25986a85d04a0682e5b7b9bda, disabling compactions & flushes 2024-12-13T07:35:10,198 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1703): Closing region testExportExpiredSnapshot,1,1734075310170.aa4b6ee25986a85d04a0682e5b7b9bda. 2024-12-13T07:35:10,198 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportExpiredSnapshot,1,1734075310170.aa4b6ee25986a85d04a0682e5b7b9bda. 2024-12-13T07:35:10,198 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testExportExpiredSnapshot,1,1734075310170.aa4b6ee25986a85d04a0682e5b7b9bda. after waiting 0 ms 2024-12-13T07:35:10,198 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testExportExpiredSnapshot,1,1734075310170.aa4b6ee25986a85d04a0682e5b7b9bda. 2024-12-13T07:35:10,198 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1922): Closed testExportExpiredSnapshot,1,1734075310170.aa4b6ee25986a85d04a0682e5b7b9bda. 2024-12-13T07:35:10,198 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1635): Region close journal for aa4b6ee25986a85d04a0682e5b7b9bda: 2024-12-13T07:35:10,199 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T07:35:10,199 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,,1734075310170.9e44ea00e032406763f2d24d62083e47.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1734075310199"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734075310199"}]},"ts":"1734075310199"} 2024-12-13T07:35:10,199 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,1,1734075310170.aa4b6ee25986a85d04a0682e5b7b9bda.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1734075310199"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734075310199"}]},"ts":"1734075310199"} 2024-12-13T07:35:10,201 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-13T07:35:10,202 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T07:35:10,202 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075310202"}]},"ts":"1734075310202"} 2024-12-13T07:35:10,203 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-13T07:35:10,257 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(202): Hosts are {9eb75de94ef3=0} racks are {/default-rack=0} 2024-12-13T07:35:10,260 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-13T07:35:10,260 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-13T07:35:10,260 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-13T07:35:10,260 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-13T07:35:10,260 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-13T07:35:10,260 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-13T07:35:10,260 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-13T07:35:10,261 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=9e44ea00e032406763f2d24d62083e47, ASSIGN}, {pid=149, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=aa4b6ee25986a85d04a0682e5b7b9bda, ASSIGN}] 2024-12-13T07:35:10,263 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=9e44ea00e032406763f2d24d62083e47, ASSIGN 2024-12-13T07:35:10,263 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=149, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=aa4b6ee25986a85d04a0682e5b7b9bda, ASSIGN 2024-12-13T07:35:10,264 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=9e44ea00e032406763f2d24d62083e47, ASSIGN; state=OFFLINE, location=9eb75de94ef3,42189,1734075182526; forceNewPlan=false, retain=false 2024-12-13T07:35:10,264 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=149, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=aa4b6ee25986a85d04a0682e5b7b9bda, ASSIGN; state=OFFLINE, location=9eb75de94ef3,41895,1734075182430; forceNewPlan=false, retain=false 2024-12-13T07:35:10,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-13T07:35:10,414 INFO [9eb75de94ef3:33713 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-13T07:35:10,415 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=9e44ea00e032406763f2d24d62083e47, regionState=OPENING, regionLocation=9eb75de94ef3,42189,1734075182526 2024-12-13T07:35:10,415 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=149 updating hbase:meta row=aa4b6ee25986a85d04a0682e5b7b9bda, regionState=OPENING, regionLocation=9eb75de94ef3,41895,1734075182430 2024-12-13T07:35:10,417 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=150, ppid=149, state=RUNNABLE; OpenRegionProcedure aa4b6ee25986a85d04a0682e5b7b9bda, server=9eb75de94ef3,41895,1734075182430}] 2024-12-13T07:35:10,418 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=148, state=RUNNABLE; OpenRegionProcedure 9e44ea00e032406763f2d24d62083e47, server=9eb75de94ef3,42189,1734075182526}] 2024-12-13T07:35:10,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-13T07:35:10,570 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,41895,1734075182430 2024-12-13T07:35:10,572 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,42189,1734075182526 2024-12-13T07:35:10,577 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] handler.AssignRegionHandler(135): Open testExportExpiredSnapshot,1,1734075310170.aa4b6ee25986a85d04a0682e5b7b9bda. 2024-12-13T07:35:10,578 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7285): Opening region: {ENCODED => aa4b6ee25986a85d04a0682e5b7b9bda, NAME => 'testExportExpiredSnapshot,1,1734075310170.aa4b6ee25986a85d04a0682e5b7b9bda.', STARTKEY => '1', ENDKEY => ''} 2024-12-13T07:35:10,578 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] handler.AssignRegionHandler(135): Open testExportExpiredSnapshot,,1734075310170.9e44ea00e032406763f2d24d62083e47. 2024-12-13T07:35:10,578 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7999): Registered coprocessor service: region=testExportExpiredSnapshot,1,1734075310170.aa4b6ee25986a85d04a0682e5b7b9bda. service=AccessControlService 2024-12-13T07:35:10,578 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7285): Opening region: {ENCODED => 9e44ea00e032406763f2d24d62083e47, NAME => 'testExportExpiredSnapshot,,1734075310170.9e44ea00e032406763f2d24d62083e47.', STARTKEY => '', ENDKEY => '1'} 2024-12-13T07:35:10,579 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T07:35:10,579 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7999): Registered coprocessor service: region=testExportExpiredSnapshot,,1734075310170.9e44ea00e032406763f2d24d62083e47. service=AccessControlService 2024-12-13T07:35:10,579 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot aa4b6ee25986a85d04a0682e5b7b9bda 2024-12-13T07:35:10,579 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T07:35:10,579 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(894): Instantiated testExportExpiredSnapshot,1,1734075310170.aa4b6ee25986a85d04a0682e5b7b9bda.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:35:10,579 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7327): checking encryption for aa4b6ee25986a85d04a0682e5b7b9bda 2024-12-13T07:35:10,579 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 9e44ea00e032406763f2d24d62083e47 2024-12-13T07:35:10,579 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7330): checking classloading for aa4b6ee25986a85d04a0682e5b7b9bda 2024-12-13T07:35:10,579 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(894): Instantiated testExportExpiredSnapshot,,1734075310170.9e44ea00e032406763f2d24d62083e47.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:35:10,579 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7327): checking encryption for 9e44ea00e032406763f2d24d62083e47 2024-12-13T07:35:10,580 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7330): checking classloading for 9e44ea00e032406763f2d24d62083e47 2024-12-13T07:35:10,581 INFO [StoreOpener-aa4b6ee25986a85d04a0682e5b7b9bda-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region aa4b6ee25986a85d04a0682e5b7b9bda 2024-12-13T07:35:10,581 INFO [StoreOpener-9e44ea00e032406763f2d24d62083e47-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 9e44ea00e032406763f2d24d62083e47 2024-12-13T07:35:10,582 INFO [StoreOpener-aa4b6ee25986a85d04a0682e5b7b9bda-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region aa4b6ee25986a85d04a0682e5b7b9bda columnFamilyName cf 2024-12-13T07:35:10,582 DEBUG [StoreOpener-aa4b6ee25986a85d04a0682e5b7b9bda-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:35:10,582 INFO [StoreOpener-9e44ea00e032406763f2d24d62083e47-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9e44ea00e032406763f2d24d62083e47 columnFamilyName cf 2024-12-13T07:35:10,582 DEBUG [StoreOpener-9e44ea00e032406763f2d24d62083e47-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:35:10,583 INFO [StoreOpener-aa4b6ee25986a85d04a0682e5b7b9bda-1 {}] regionserver.HStore(327): Store=aa4b6ee25986a85d04a0682e5b7b9bda/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T07:35:10,583 INFO [StoreOpener-9e44ea00e032406763f2d24d62083e47-1 {}] regionserver.HStore(327): Store=9e44ea00e032406763f2d24d62083e47/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T07:35:10,583 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportExpiredSnapshot/9e44ea00e032406763f2d24d62083e47 2024-12-13T07:35:10,583 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportExpiredSnapshot/aa4b6ee25986a85d04a0682e5b7b9bda 2024-12-13T07:35:10,584 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportExpiredSnapshot/aa4b6ee25986a85d04a0682e5b7b9bda 2024-12-13T07:35:10,584 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportExpiredSnapshot/9e44ea00e032406763f2d24d62083e47 2024-12-13T07:35:10,585 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1085): writing seq id for 9e44ea00e032406763f2d24d62083e47 2024-12-13T07:35:10,585 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1085): writing seq id for aa4b6ee25986a85d04a0682e5b7b9bda 2024-12-13T07:35:10,587 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportExpiredSnapshot/aa4b6ee25986a85d04a0682e5b7b9bda/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T07:35:10,587 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportExpiredSnapshot/9e44ea00e032406763f2d24d62083e47/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T07:35:10,587 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1102): Opened aa4b6ee25986a85d04a0682e5b7b9bda; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72264489, jitterRate=0.07682479918003082}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T07:35:10,587 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1102): Opened 9e44ea00e032406763f2d24d62083e47; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75413257, jitterRate=0.12374509871006012}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T07:35:10,588 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1001): Region open journal for 9e44ea00e032406763f2d24d62083e47: 2024-12-13T07:35:10,588 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1001): Region open journal for aa4b6ee25986a85d04a0682e5b7b9bda: 2024-12-13T07:35:10,589 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegionServer(2601): Post open deploy tasks for testExportExpiredSnapshot,1,1734075310170.aa4b6ee25986a85d04a0682e5b7b9bda., pid=150, masterSystemTime=1734075310570 2024-12-13T07:35:10,589 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegionServer(2601): Post open deploy tasks for testExportExpiredSnapshot,,1734075310170.9e44ea00e032406763f2d24d62083e47., pid=151, masterSystemTime=1734075310572 2024-12-13T07:35:10,590 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegionServer(2628): Finished post open deploy task for testExportExpiredSnapshot,,1734075310170.9e44ea00e032406763f2d24d62083e47. 2024-12-13T07:35:10,590 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] handler.AssignRegionHandler(164): Opened testExportExpiredSnapshot,,1734075310170.9e44ea00e032406763f2d24d62083e47. 2024-12-13T07:35:10,590 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=9e44ea00e032406763f2d24d62083e47, regionState=OPEN, openSeqNum=2, regionLocation=9eb75de94ef3,42189,1734075182526 2024-12-13T07:35:10,590 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegionServer(2628): Finished post open deploy task for testExportExpiredSnapshot,1,1734075310170.aa4b6ee25986a85d04a0682e5b7b9bda. 2024-12-13T07:35:10,591 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] handler.AssignRegionHandler(164): Opened testExportExpiredSnapshot,1,1734075310170.aa4b6ee25986a85d04a0682e5b7b9bda. 2024-12-13T07:35:10,591 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=149 updating hbase:meta row=aa4b6ee25986a85d04a0682e5b7b9bda, regionState=OPEN, openSeqNum=2, regionLocation=9eb75de94ef3,41895,1734075182430 2024-12-13T07:35:10,593 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=148 2024-12-13T07:35:10,593 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=148, state=SUCCESS; OpenRegionProcedure 9e44ea00e032406763f2d24d62083e47, server=9eb75de94ef3,42189,1734075182526 in 174 msec 2024-12-13T07:35:10,593 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=150, resume processing ppid=149 2024-12-13T07:35:10,594 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, ppid=149, state=SUCCESS; OpenRegionProcedure aa4b6ee25986a85d04a0682e5b7b9bda, server=9eb75de94ef3,41895,1734075182430 in 175 msec 2024-12-13T07:35:10,594 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, ppid=147, state=SUCCESS; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=9e44ea00e032406763f2d24d62083e47, ASSIGN in 332 msec 2024-12-13T07:35:10,595 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=147 2024-12-13T07:35:10,595 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=147, state=SUCCESS; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=aa4b6ee25986a85d04a0682e5b7b9bda, ASSIGN in 333 msec 2024-12-13T07:35:10,595 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T07:35:10,595 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075310595"}]},"ts":"1734075310595"} 2024-12-13T07:35:10,597 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-13T07:35:10,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-13T07:35:10,786 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T07:35:10,786 DEBUG [PEWorker-3 {}] access.PermissionStorage(175): Writing permission with rowKey testExportExpiredSnapshot jenkins: RWXCA 2024-12-13T07:35:10,788 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41895 {}] access.PermissionStorage(611): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-13T07:35:11,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:35:11,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:35:11,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:35:11,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:35:11,030 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-12-13T07:35:11,163 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T07:35:11,163 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T07:35:11,164 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T07:35:11,164 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T07:35:11,164 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T07:35:11,164 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T07:35:11,164 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T07:35:11,164 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T07:35:11,164 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, state=SUCCESS; CreateTableProcedure table=testExportExpiredSnapshot in 992 msec 2024-12-13T07:35:11,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-13T07:35:11,282 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testExportExpiredSnapshot, procId: 147 completed 2024-12-13T07:35:11,282 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-12-13T07:35:11,282 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T07:35:11,287 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-12-13T07:35:11,287 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T07:35:11,287 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testExportExpiredSnapshot assigned. 2024-12-13T07:35:11,296 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42189 {}] regionserver.HRegion(8254): writing data to region testExportExpiredSnapshot,,1734075310170.9e44ea00e032406763f2d24d62083e47. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T07:35:11,297 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41895 {}] regionserver.HRegion(8254): writing data to region testExportExpiredSnapshot,1,1734075310170.aa4b6ee25986a85d04a0682e5b7b9bda. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T07:35:11,300 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testExportExpiredSnapshot 2024-12-13T07:35:11,300 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testExportExpiredSnapshot,,1734075310170.9e44ea00e032406763f2d24d62083e47. 2024-12-13T07:35:11,300 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T07:35:11,308 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-13T07:35:11,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snapshot-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-13T07:35:11,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T07:35:11,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4dd871f6 to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1dd38887 2024-12-13T07:35:11,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42a3a824, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:35:11,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:35:11,494 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38322, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:35:11,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4dd871f6 to 127.0.0.1:59810 2024-12-13T07:35:11,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:35:11,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7a3000aa to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@463d103e 2024-12-13T07:35:11,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@38b02e2f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:35:11,590 DEBUG [hconnection-0x768607c8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:35:11,591 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38334, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:35:11,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:35:11,595 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45912, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:35:11,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7a3000aa to 127.0.0.1:59810 2024-12-13T07:35:11,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:35:11,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] access.PermissionStorage(611): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-13T07:35:11,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T07:35:11,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=152, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-13T07:35:11,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 152 2024-12-13T07:35:11,599 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PREPARE 2024-12-13T07:35:11,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-13T07:35:11,600 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T07:35:11,603 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T07:35:11,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742180_1356 (size=152) 2024-12-13T07:35:11,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742180_1356 (size=152) 2024-12-13T07:35:11,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742180_1356 (size=152) 2024-12-13T07:35:11,613 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T07:35:11,613 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 9e44ea00e032406763f2d24d62083e47}, {pid=154, ppid=152, state=RUNNABLE; SnapshotRegionProcedure aa4b6ee25986a85d04a0682e5b7b9bda}] 2024-12-13T07:35:11,614 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=153, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 9e44ea00e032406763f2d24d62083e47 2024-12-13T07:35:11,614 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=154, ppid=152, state=RUNNABLE; SnapshotRegionProcedure aa4b6ee25986a85d04a0682e5b7b9bda 2024-12-13T07:35:11,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-13T07:35:11,765 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,42189,1734075182526 2024-12-13T07:35:11,765 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,41895,1734075182430 2024-12-13T07:35:11,766 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41895 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=154 2024-12-13T07:35:11,766 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42189 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=153 2024-12-13T07:35:11,767 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,,1734075310170.9e44ea00e032406763f2d24d62083e47. 2024-12-13T07:35:11,767 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,1,1734075310170.aa4b6ee25986a85d04a0682e5b7b9bda. 2024-12-13T07:35:11,767 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HRegion(2837): Flushing 9e44ea00e032406763f2d24d62083e47 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-13T07:35:11,768 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HRegion(2837): Flushing aa4b6ee25986a85d04a0682e5b7b9bda 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-13T07:35:11,787 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportExpiredSnapshot/9e44ea00e032406763f2d24d62083e47/.tmp/cf/2554e3c20dd345dc92a6cdd0a692a543 is 71, key is 095359f1899a4926ad6259ef92528368/cf:q/1734075311296/Put/seqid=0 2024-12-13T07:35:11,788 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportExpiredSnapshot/aa4b6ee25986a85d04a0682e5b7b9bda/.tmp/cf/e060dbfae4234c7a809774c8053b6990 is 71, key is 11be6142481d39b45d3afb83887d8223/cf:q/1734075311297/Put/seqid=0 2024-12-13T07:35:11,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742181_1357 (size=5356) 2024-12-13T07:35:11,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742181_1357 (size=5356) 2024-12-13T07:35:11,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742181_1357 (size=5356) 2024-12-13T07:35:11,792 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportExpiredSnapshot/9e44ea00e032406763f2d24d62083e47/.tmp/cf/2554e3c20dd345dc92a6cdd0a692a543 2024-12-13T07:35:11,797 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportExpiredSnapshot/9e44ea00e032406763f2d24d62083e47/.tmp/cf/2554e3c20dd345dc92a6cdd0a692a543 as hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportExpiredSnapshot/9e44ea00e032406763f2d24d62083e47/cf/2554e3c20dd345dc92a6cdd0a692a543 2024-12-13T07:35:11,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742182_1358 (size=8256) 2024-12-13T07:35:11,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742182_1358 (size=8256) 2024-12-13T07:35:11,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742182_1358 (size=8256) 2024-12-13T07:35:11,800 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportExpiredSnapshot/aa4b6ee25986a85d04a0682e5b7b9bda/.tmp/cf/e060dbfae4234c7a809774c8053b6990 2024-12-13T07:35:11,802 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportExpiredSnapshot/9e44ea00e032406763f2d24d62083e47/cf/2554e3c20dd345dc92a6cdd0a692a543, entries=4, sequenceid=5, filesize=5.2 K 2024-12-13T07:35:11,803 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HRegion(3040): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 9e44ea00e032406763f2d24d62083e47 in 36ms, sequenceid=5, compaction requested=false 2024-12-13T07:35:11,803 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HRegion(2538): Flush status journal for 9e44ea00e032406763f2d24d62083e47: 2024-12-13T07:35:11,803 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,,1734075310170.9e44ea00e032406763f2d24d62083e47. for snapshot-testExportExpiredSnapshot completed. 2024-12-13T07:35:11,803 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,,1734075310170.9e44ea00e032406763f2d24d62083e47.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-13T07:35:11,803 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T07:35:11,803 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportExpiredSnapshot/9e44ea00e032406763f2d24d62083e47/cf/2554e3c20dd345dc92a6cdd0a692a543] hfiles 2024-12-13T07:35:11,803 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportExpiredSnapshot/9e44ea00e032406763f2d24d62083e47/cf/2554e3c20dd345dc92a6cdd0a692a543 for snapshot=snapshot-testExportExpiredSnapshot 2024-12-13T07:35:11,806 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportExpiredSnapshot/aa4b6ee25986a85d04a0682e5b7b9bda/.tmp/cf/e060dbfae4234c7a809774c8053b6990 as hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportExpiredSnapshot/aa4b6ee25986a85d04a0682e5b7b9bda/cf/e060dbfae4234c7a809774c8053b6990 2024-12-13T07:35:11,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742183_1359 (size=103) 2024-12-13T07:35:11,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742183_1359 (size=103) 2024-12-13T07:35:11,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742183_1359 (size=103) 2024-12-13T07:35:11,810 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,,1734075310170.9e44ea00e032406763f2d24d62083e47. 2024-12-13T07:35:11,810 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=153 2024-12-13T07:35:11,810 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportExpiredSnapshot/aa4b6ee25986a85d04a0682e5b7b9bda/cf/e060dbfae4234c7a809774c8053b6990, entries=46, sequenceid=5, filesize=8.1 K 2024-12-13T07:35:11,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster(4106): Remote procedure done, pid=153 2024-12-13T07:35:11,810 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 9e44ea00e032406763f2d24d62083e47 2024-12-13T07:35:11,810 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=153, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 9e44ea00e032406763f2d24d62083e47 2024-12-13T07:35:11,811 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HRegion(3040): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for aa4b6ee25986a85d04a0682e5b7b9bda in 44ms, sequenceid=5, compaction requested=false 2024-12-13T07:35:11,811 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HRegion(2538): Flush status journal for aa4b6ee25986a85d04a0682e5b7b9bda: 2024-12-13T07:35:11,811 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,1,1734075310170.aa4b6ee25986a85d04a0682e5b7b9bda. for snapshot-testExportExpiredSnapshot completed. 2024-12-13T07:35:11,811 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,1,1734075310170.aa4b6ee25986a85d04a0682e5b7b9bda.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-13T07:35:11,811 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T07:35:11,811 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportExpiredSnapshot/aa4b6ee25986a85d04a0682e5b7b9bda/cf/e060dbfae4234c7a809774c8053b6990] hfiles 2024-12-13T07:35:11,811 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportExpiredSnapshot/aa4b6ee25986a85d04a0682e5b7b9bda/cf/e060dbfae4234c7a809774c8053b6990 for snapshot=snapshot-testExportExpiredSnapshot 2024-12-13T07:35:11,812 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; SnapshotRegionProcedure 9e44ea00e032406763f2d24d62083e47 in 198 msec 2024-12-13T07:35:11,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742184_1360 (size=103) 2024-12-13T07:35:11,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742184_1360 (size=103) 2024-12-13T07:35:11,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742184_1360 (size=103) 2024-12-13T07:35:11,817 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,1,1734075310170.aa4b6ee25986a85d04a0682e5b7b9bda. 2024-12-13T07:35:11,817 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=154 2024-12-13T07:35:11,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster(4106): Remote procedure done, pid=154 2024-12-13T07:35:11,817 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region aa4b6ee25986a85d04a0682e5b7b9bda 2024-12-13T07:35:11,817 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=154, ppid=152, state=RUNNABLE; SnapshotRegionProcedure aa4b6ee25986a85d04a0682e5b7b9bda 2024-12-13T07:35:11,819 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=154, resume processing ppid=152 2024-12-13T07:35:11,819 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, ppid=152, state=SUCCESS; SnapshotRegionProcedure aa4b6ee25986a85d04a0682e5b7b9bda in 205 msec 2024-12-13T07:35:11,819 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T07:35:11,819 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T07:35:11,820 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T07:35:11,820 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportExpiredSnapshot 2024-12-13T07:35:11,820 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-13T07:35:11,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742185_1361 (size=609) 2024-12-13T07:35:11,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742185_1361 (size=609) 2024-12-13T07:35:11,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742185_1361 (size=609) 2024-12-13T07:35:11,828 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T07:35:11,832 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T07:35:11,833 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snapshot-testExportExpiredSnapshot 2024-12-13T07:35:11,834 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T07:35:11,834 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 152 2024-12-13T07:35:11,835 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } in 237 msec 2024-12-13T07:35:11,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-13T07:35:11,902 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testExportExpiredSnapshot, procId: 152 completed 2024-12-13T07:35:12,021 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-13T07:35:12,021 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-13T07:35:12,022 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-13T07:35:12,022 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-13T07:35:12,022 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T07:35:12,022 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T07:35:12,663 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0006_000001 (auth:SIMPLE) from 127.0.0.1:43198 2024-12-13T07:35:12,673 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-0_2/usercache/jenkins/appcache/application_1734075189398_0006/container_1734075189398_0006_01_000001/launch_container.sh] 2024-12-13T07:35:12,673 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-0_2/usercache/jenkins/appcache/application_1734075189398_0006/container_1734075189398_0006_01_000001/container_tokens] 2024-12-13T07:35:12,674 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-0_2/usercache/jenkins/appcache/application_1734075189398_0006/container_1734075189398_0006_01_000001/sysfs] 2024-12-13T07:35:13,608 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-13T07:35:21,909 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075321909 2024-12-13T07:35:21,909 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:41477, tgtDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075321909, rawTgtDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075321909, srcFsUri=hdfs://localhost:41477, srcDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:35:21,936 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:41477, inputRoot=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:35:21,936 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-313103365_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075321909, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075321909/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-13T07:35:21,939 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-13T07:35:21,940 ERROR [Time-limited test {}] util.AbstractHBaseTool(153): Error running command-line tool org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException: TTL for snapshot 'snapshot-testExportExpiredSnapshot' has already expired. at org.apache.hadoop.hbase.snapshot.ExportSnapshot.verifySnapshot(ExportSnapshot.java:948) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1093) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:151) ~[hbase-common-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:523) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportExpiredSnapshot(TestExportSnapshot.java:315) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T07:35:21,941 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportExpiredSnapshot 2024-12-13T07:35:21,941 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportExpiredSnapshot 2024-12-13T07:35:21,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=155, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-13T07:35:21,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-13T07:35:21,943 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075321943"}]},"ts":"1734075321943"} 2024-12-13T07:35:21,944 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLING in hbase:meta 2024-12-13T07:35:22,023 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportExpiredSnapshot to state=DISABLING 2024-12-13T07:35:22,025 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot}] 2024-12-13T07:35:22,028 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=157, ppid=156, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=e7d1f4a6bda4a3ce579351e28dad5381, UNASSIGN}, {pid=158, ppid=156, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=0cadc788bc38c378b787524bce4633b8, UNASSIGN}] 2024-12-13T07:35:22,030 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=158, ppid=156, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=0cadc788bc38c378b787524bce4633b8, UNASSIGN 2024-12-13T07:35:22,030 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=157, ppid=156, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=e7d1f4a6bda4a3ce579351e28dad5381, UNASSIGN 2024-12-13T07:35:22,031 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=157 updating hbase:meta row=e7d1f4a6bda4a3ce579351e28dad5381, regionState=CLOSING, regionLocation=9eb75de94ef3,42189,1734075182526 2024-12-13T07:35:22,031 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=158 updating hbase:meta row=0cadc788bc38c378b787524bce4633b8, regionState=CLOSING, regionLocation=9eb75de94ef3,36021,1734075182342 2024-12-13T07:35:22,033 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T07:35:22,033 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=159, ppid=157, state=RUNNABLE; CloseRegionProcedure e7d1f4a6bda4a3ce579351e28dad5381, server=9eb75de94ef3,42189,1734075182526}] 2024-12-13T07:35:22,034 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T07:35:22,034 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=158, state=RUNNABLE; CloseRegionProcedure 0cadc788bc38c378b787524bce4633b8, server=9eb75de94ef3,36021,1734075182342}] 2024-12-13T07:35:22,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-13T07:35:22,185 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,42189,1734075182526 2024-12-13T07:35:22,186 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,36021,1734075182342 2024-12-13T07:35:22,186 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] handler.UnassignRegionHandler(124): Close e7d1f4a6bda4a3ce579351e28dad5381 2024-12-13T07:35:22,187 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T07:35:22,187 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1681): Closing e7d1f4a6bda4a3ce579351e28dad5381, disabling compactions & flushes 2024-12-13T07:35:22,187 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1703): Closing region testtb-testExportExpiredSnapshot,,1734075308797.e7d1f4a6bda4a3ce579351e28dad5381. 2024-12-13T07:35:22,187 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(124): Close 0cadc788bc38c378b787524bce4633b8 2024-12-13T07:35:22,187 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportExpiredSnapshot,,1734075308797.e7d1f4a6bda4a3ce579351e28dad5381. 2024-12-13T07:35:22,187 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportExpiredSnapshot,,1734075308797.e7d1f4a6bda4a3ce579351e28dad5381. after waiting 0 ms 2024-12-13T07:35:22,187 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportExpiredSnapshot,,1734075308797.e7d1f4a6bda4a3ce579351e28dad5381. 2024-12-13T07:35:22,187 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T07:35:22,188 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1681): Closing 0cadc788bc38c378b787524bce4633b8, disabling compactions & flushes 2024-12-13T07:35:22,188 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1703): Closing region testtb-testExportExpiredSnapshot,1,1734075308797.0cadc788bc38c378b787524bce4633b8. 2024-12-13T07:35:22,188 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportExpiredSnapshot,1,1734075308797.0cadc788bc38c378b787524bce4633b8. 2024-12-13T07:35:22,188 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportExpiredSnapshot,1,1734075308797.0cadc788bc38c378b787524bce4633b8. after waiting 0 ms 2024-12-13T07:35:22,188 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportExpiredSnapshot,1,1734075308797.0cadc788bc38c378b787524bce4633b8. 2024-12-13T07:35:22,197 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportExpiredSnapshot/e7d1f4a6bda4a3ce579351e28dad5381/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T07:35:22,197 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportExpiredSnapshot/0cadc788bc38c378b787524bce4633b8/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T07:35:22,198 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T07:35:22,198 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T07:35:22,198 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1922): Closed testtb-testExportExpiredSnapshot,,1734075308797.e7d1f4a6bda4a3ce579351e28dad5381. 2024-12-13T07:35:22,198 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1635): Region close journal for e7d1f4a6bda4a3ce579351e28dad5381: 2024-12-13T07:35:22,198 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1922): Closed testtb-testExportExpiredSnapshot,1,1734075308797.0cadc788bc38c378b787524bce4633b8. 2024-12-13T07:35:22,198 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1635): Region close journal for 0cadc788bc38c378b787524bce4633b8: 2024-12-13T07:35:22,199 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(170): Closed 0cadc788bc38c378b787524bce4633b8 2024-12-13T07:35:22,200 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=158 updating hbase:meta row=0cadc788bc38c378b787524bce4633b8, regionState=CLOSED 2024-12-13T07:35:22,200 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] handler.UnassignRegionHandler(170): Closed e7d1f4a6bda4a3ce579351e28dad5381 2024-12-13T07:35:22,200 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=157 updating hbase:meta row=e7d1f4a6bda4a3ce579351e28dad5381, regionState=CLOSED 2024-12-13T07:35:22,203 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=158 2024-12-13T07:35:22,203 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=158, state=SUCCESS; CloseRegionProcedure 0cadc788bc38c378b787524bce4633b8, server=9eb75de94ef3,36021,1734075182342 in 167 msec 2024-12-13T07:35:22,203 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=159, resume processing ppid=157 2024-12-13T07:35:22,203 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, ppid=157, state=SUCCESS; CloseRegionProcedure e7d1f4a6bda4a3ce579351e28dad5381, server=9eb75de94ef3,42189,1734075182526 in 169 msec 2024-12-13T07:35:22,204 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, ppid=156, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=0cadc788bc38c378b787524bce4633b8, UNASSIGN in 175 msec 2024-12-13T07:35:22,205 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=157, resume processing ppid=156 2024-12-13T07:35:22,205 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, ppid=156, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=e7d1f4a6bda4a3ce579351e28dad5381, UNASSIGN in 175 msec 2024-12-13T07:35:22,206 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=155 2024-12-13T07:35:22,206 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=155, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot in 180 msec 2024-12-13T07:35:22,207 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075322207"}]},"ts":"1734075322207"} 2024-12-13T07:35:22,208 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLED in hbase:meta 2024-12-13T07:35:22,218 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportExpiredSnapshot to state=DISABLED 2024-12-13T07:35:22,219 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, state=SUCCESS; DisableTableProcedure table=testtb-testExportExpiredSnapshot in 278 msec 2024-12-13T07:35:22,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-13T07:35:22,245 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportExpiredSnapshot, procId: 155 completed 2024-12-13T07:35:22,246 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportExpiredSnapshot 2024-12-13T07:35:22,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-13T07:35:22,247 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-13T07:35:22,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportExpiredSnapshot 2024-12-13T07:35:22,248 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=161, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-13T07:35:22,248 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41895 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportExpiredSnapshot 2024-12-13T07:35:22,250 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportExpiredSnapshot/e7d1f4a6bda4a3ce579351e28dad5381 2024-12-13T07:35:22,250 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportExpiredSnapshot/0cadc788bc38c378b787524bce4633b8 2024-12-13T07:35:22,252 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportExpiredSnapshot/e7d1f4a6bda4a3ce579351e28dad5381/cf, FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportExpiredSnapshot/e7d1f4a6bda4a3ce579351e28dad5381/recovered.edits] 2024-12-13T07:35:22,252 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportExpiredSnapshot/0cadc788bc38c378b787524bce4633b8/cf, FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportExpiredSnapshot/0cadc788bc38c378b787524bce4633b8/recovered.edits] 2024-12-13T07:35:22,257 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportExpiredSnapshot/e7d1f4a6bda4a3ce579351e28dad5381/cf/544c624b79134b8c8199debd20096467 to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testExportExpiredSnapshot/e7d1f4a6bda4a3ce579351e28dad5381/cf/544c624b79134b8c8199debd20096467 2024-12-13T07:35:22,257 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportExpiredSnapshot/0cadc788bc38c378b787524bce4633b8/cf/65a8ad6172fe48c0b54770785927cde4 to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testExportExpiredSnapshot/0cadc788bc38c378b787524bce4633b8/cf/65a8ad6172fe48c0b54770785927cde4 2024-12-13T07:35:22,260 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportExpiredSnapshot/0cadc788bc38c378b787524bce4633b8/recovered.edits/9.seqid to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testExportExpiredSnapshot/0cadc788bc38c378b787524bce4633b8/recovered.edits/9.seqid 2024-12-13T07:35:22,260 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportExpiredSnapshot/e7d1f4a6bda4a3ce579351e28dad5381/recovered.edits/9.seqid to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testExportExpiredSnapshot/e7d1f4a6bda4a3ce579351e28dad5381/recovered.edits/9.seqid 2024-12-13T07:35:22,261 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportExpiredSnapshot/0cadc788bc38c378b787524bce4633b8 2024-12-13T07:35:22,261 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportExpiredSnapshot/e7d1f4a6bda4a3ce579351e28dad5381 2024-12-13T07:35:22,261 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportExpiredSnapshot regions 2024-12-13T07:35:22,263 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=161, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-13T07:35:22,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-13T07:35:22,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-13T07:35:22,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-13T07:35:22,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-13T07:35:22,265 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-13T07:35:22,265 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-13T07:35:22,265 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-13T07:35:22,265 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-13T07:35:22,266 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportExpiredSnapshot from hbase:meta 2024-12-13T07:35:22,267 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportExpiredSnapshot' descriptor. 2024-12-13T07:35:22,268 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=161, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-13T07:35:22,268 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportExpiredSnapshot' from region states. 2024-12-13T07:35:22,269 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,,1734075308797.e7d1f4a6bda4a3ce579351e28dad5381.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734075322269"}]},"ts":"9223372036854775807"} 2024-12-13T07:35:22,269 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,1,1734075308797.0cadc788bc38c378b787524bce4633b8.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734075322269"}]},"ts":"9223372036854775807"} 2024-12-13T07:35:22,270 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-13T07:35:22,270 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => e7d1f4a6bda4a3ce579351e28dad5381, NAME => 'testtb-testExportExpiredSnapshot,,1734075308797.e7d1f4a6bda4a3ce579351e28dad5381.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 0cadc788bc38c378b787524bce4633b8, NAME => 'testtb-testExportExpiredSnapshot,1,1734075308797.0cadc788bc38c378b787524bce4633b8.', STARTKEY => '1', ENDKEY => ''}] 2024-12-13T07:35:22,271 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportExpiredSnapshot' as deleted. 2024-12-13T07:35:22,271 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734075322271"}]},"ts":"9223372036854775807"} 2024-12-13T07:35:22,272 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportExpiredSnapshot state from META 2024-12-13T07:35:22,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-13T07:35:22,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-13T07:35:22,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-13T07:35:22,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:35:22,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-13T07:35:22,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:35:22,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:35:22,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:35:22,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-13T07:35:22,281 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T07:35:22,281 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T07:35:22,281 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T07:35:22,282 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T07:35:22,282 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=161, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-13T07:35:22,283 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, state=SUCCESS; DeleteTableProcedure table=testtb-testExportExpiredSnapshot in 36 msec 2024-12-13T07:35:22,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-13T07:35:22,376 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportExpiredSnapshot, procId: 161 completed 2024-12-13T07:35:22,393 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportExpiredSnapshot" 2024-12-13T07:35:22,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportExpiredSnapshot 2024-12-13T07:35:22,396 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportExpiredSnapshot" 2024-12-13T07:35:22,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotManager(380): Deleting snapshot: snapshot-testExportExpiredSnapshot 2024-12-13T07:35:22,399 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportExpiredSnapshot" 2024-12-13T07:35:22,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportExpiredSnapshot 2024-12-13T07:35:22,419 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=795 (was 805), OpenFileDescriptor=799 (was 812), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=417 (was 475), ProcessCount=11 (was 20), AvailableMemoryMB=7189 (was 6227) - AvailableMemoryMB LEAK? - 2024-12-13T07:35:22,419 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=795 is superior to 500 2024-12-13T07:35:22,433 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=795, OpenFileDescriptor=799, MaxFileDescriptor=1048576, SystemLoadAverage=417, ProcessCount=11, AvailableMemoryMB=7188 2024-12-13T07:35:22,433 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=795 is superior to 500 2024-12-13T07:35:22,434 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T07:35:22,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=162, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-13T07:35:22,436 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T07:35:22,436 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:35:22,436 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testEmptyExportFileSystemState" procId is: 162 2024-12-13T07:35:22,436 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T07:35:22,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-13T07:35:22,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742186_1362 (size=412) 2024-12-13T07:35:22,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742186_1362 (size=412) 2024-12-13T07:35:22,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742186_1362 (size=412) 2024-12-13T07:35:22,443 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 4c7a924e1bb62722de6c48ffcadf7b19, NAME => 'testtb-testEmptyExportFileSystemState,,1734075322434.4c7a924e1bb62722de6c48ffcadf7b19.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:35:22,443 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 6572c03e43180e068903f7f56608b8da, NAME => 'testtb-testEmptyExportFileSystemState,1,1734075322434.6572c03e43180e068903f7f56608b8da.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:35:22,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742188_1364 (size=73) 2024-12-13T07:35:22,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742188_1364 (size=73) 2024-12-13T07:35:22,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742188_1364 (size=73) 2024-12-13T07:35:22,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742187_1363 (size=73) 2024-12-13T07:35:22,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742187_1363 (size=73) 2024-12-13T07:35:22,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742187_1363 (size=73) 2024-12-13T07:35:22,453 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testEmptyExportFileSystemState,1,1734075322434.6572c03e43180e068903f7f56608b8da.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:35:22,453 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1681): Closing 6572c03e43180e068903f7f56608b8da, disabling compactions & flushes 2024-12-13T07:35:22,453 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testEmptyExportFileSystemState,1,1734075322434.6572c03e43180e068903f7f56608b8da. 2024-12-13T07:35:22,453 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testEmptyExportFileSystemState,1,1734075322434.6572c03e43180e068903f7f56608b8da. 2024-12-13T07:35:22,453 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1734075322434.6572c03e43180e068903f7f56608b8da. after waiting 0 ms 2024-12-13T07:35:22,453 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1734075322434.6572c03e43180e068903f7f56608b8da. 2024-12-13T07:35:22,454 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testEmptyExportFileSystemState,1,1734075322434.6572c03e43180e068903f7f56608b8da. 2024-12-13T07:35:22,454 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1635): Region close journal for 6572c03e43180e068903f7f56608b8da: 2024-12-13T07:35:22,454 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testEmptyExportFileSystemState,,1734075322434.4c7a924e1bb62722de6c48ffcadf7b19.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:35:22,454 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1681): Closing 4c7a924e1bb62722de6c48ffcadf7b19, disabling compactions & flushes 2024-12-13T07:35:22,454 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testEmptyExportFileSystemState,,1734075322434.4c7a924e1bb62722de6c48ffcadf7b19. 2024-12-13T07:35:22,454 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testEmptyExportFileSystemState,,1734075322434.4c7a924e1bb62722de6c48ffcadf7b19. 2024-12-13T07:35:22,454 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testEmptyExportFileSystemState,,1734075322434.4c7a924e1bb62722de6c48ffcadf7b19. after waiting 0 ms 2024-12-13T07:35:22,454 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testEmptyExportFileSystemState,,1734075322434.4c7a924e1bb62722de6c48ffcadf7b19. 2024-12-13T07:35:22,454 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testEmptyExportFileSystemState,,1734075322434.4c7a924e1bb62722de6c48ffcadf7b19. 2024-12-13T07:35:22,454 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1635): Region close journal for 4c7a924e1bb62722de6c48ffcadf7b19: 2024-12-13T07:35:22,455 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T07:35:22,455 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,1,1734075322434.6572c03e43180e068903f7f56608b8da.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1734075322455"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734075322455"}]},"ts":"1734075322455"} 2024-12-13T07:35:22,455 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,,1734075322434.4c7a924e1bb62722de6c48ffcadf7b19.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1734075322455"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734075322455"}]},"ts":"1734075322455"} 2024-12-13T07:35:22,457 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-13T07:35:22,457 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T07:35:22,458 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075322457"}]},"ts":"1734075322457"} 2024-12-13T07:35:22,459 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLING in hbase:meta 2024-12-13T07:35:22,477 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(202): Hosts are {9eb75de94ef3=0} racks are {/default-rack=0} 2024-12-13T07:35:22,478 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-13T07:35:22,478 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-13T07:35:22,478 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-13T07:35:22,478 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-13T07:35:22,478 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-13T07:35:22,478 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-13T07:35:22,478 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-13T07:35:22,478 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=4c7a924e1bb62722de6c48ffcadf7b19, ASSIGN}, {pid=164, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=6572c03e43180e068903f7f56608b8da, ASSIGN}] 2024-12-13T07:35:22,479 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=164, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=6572c03e43180e068903f7f56608b8da, ASSIGN 2024-12-13T07:35:22,479 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=163, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=4c7a924e1bb62722de6c48ffcadf7b19, ASSIGN 2024-12-13T07:35:22,480 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=164, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=6572c03e43180e068903f7f56608b8da, ASSIGN; state=OFFLINE, location=9eb75de94ef3,41895,1734075182430; forceNewPlan=false, retain=false 2024-12-13T07:35:22,480 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=163, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=4c7a924e1bb62722de6c48ffcadf7b19, ASSIGN; state=OFFLINE, location=9eb75de94ef3,36021,1734075182342; forceNewPlan=false, retain=false 2024-12-13T07:35:22,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-13T07:35:22,630 INFO [9eb75de94ef3:33713 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-13T07:35:22,631 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=164 updating hbase:meta row=6572c03e43180e068903f7f56608b8da, regionState=OPENING, regionLocation=9eb75de94ef3,41895,1734075182430 2024-12-13T07:35:22,631 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=163 updating hbase:meta row=4c7a924e1bb62722de6c48ffcadf7b19, regionState=OPENING, regionLocation=9eb75de94ef3,36021,1734075182342 2024-12-13T07:35:22,636 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=165, ppid=163, state=RUNNABLE; OpenRegionProcedure 4c7a924e1bb62722de6c48ffcadf7b19, server=9eb75de94ef3,36021,1734075182342}] 2024-12-13T07:35:22,638 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=164, state=RUNNABLE; OpenRegionProcedure 6572c03e43180e068903f7f56608b8da, server=9eb75de94ef3,41895,1734075182430}] 2024-12-13T07:35:22,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-13T07:35:22,790 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,36021,1734075182342 2024-12-13T07:35:22,791 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,41895,1734075182430 2024-12-13T07:35:23,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-13T07:35:23,104 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] handler.AssignRegionHandler(135): Open testtb-testEmptyExportFileSystemState,,1734075322434.4c7a924e1bb62722de6c48ffcadf7b19. 2024-12-13T07:35:23,105 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] handler.AssignRegionHandler(135): Open testtb-testEmptyExportFileSystemState,1,1734075322434.6572c03e43180e068903f7f56608b8da. 2024-12-13T07:35:23,105 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7285): Opening region: {ENCODED => 4c7a924e1bb62722de6c48ffcadf7b19, NAME => 'testtb-testEmptyExportFileSystemState,,1734075322434.4c7a924e1bb62722de6c48ffcadf7b19.', STARTKEY => '', ENDKEY => '1'} 2024-12-13T07:35:23,105 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7285): Opening region: {ENCODED => 6572c03e43180e068903f7f56608b8da, NAME => 'testtb-testEmptyExportFileSystemState,1,1734075322434.6572c03e43180e068903f7f56608b8da.', STARTKEY => '1', ENDKEY => ''} 2024-12-13T07:35:23,105 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,,1734075322434.4c7a924e1bb62722de6c48ffcadf7b19. service=AccessControlService 2024-12-13T07:35:23,106 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,1,1734075322434.6572c03e43180e068903f7f56608b8da. service=AccessControlService 2024-12-13T07:35:23,106 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T07:35:23,106 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 4c7a924e1bb62722de6c48ffcadf7b19 2024-12-13T07:35:23,106 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(894): Instantiated testtb-testEmptyExportFileSystemState,,1734075322434.4c7a924e1bb62722de6c48ffcadf7b19.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:35:23,106 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T07:35:23,107 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7327): checking encryption for 4c7a924e1bb62722de6c48ffcadf7b19 2024-12-13T07:35:23,107 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 6572c03e43180e068903f7f56608b8da 2024-12-13T07:35:23,107 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7330): checking classloading for 4c7a924e1bb62722de6c48ffcadf7b19 2024-12-13T07:35:23,107 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(894): Instantiated testtb-testEmptyExportFileSystemState,1,1734075322434.6572c03e43180e068903f7f56608b8da.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:35:23,107 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7327): checking encryption for 6572c03e43180e068903f7f56608b8da 2024-12-13T07:35:23,107 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7330): checking classloading for 6572c03e43180e068903f7f56608b8da 2024-12-13T07:35:23,109 INFO [StoreOpener-4c7a924e1bb62722de6c48ffcadf7b19-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 4c7a924e1bb62722de6c48ffcadf7b19 2024-12-13T07:35:23,109 INFO [StoreOpener-6572c03e43180e068903f7f56608b8da-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 6572c03e43180e068903f7f56608b8da 2024-12-13T07:35:23,110 INFO [StoreOpener-4c7a924e1bb62722de6c48ffcadf7b19-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4c7a924e1bb62722de6c48ffcadf7b19 columnFamilyName cf 2024-12-13T07:35:23,110 INFO [StoreOpener-6572c03e43180e068903f7f56608b8da-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6572c03e43180e068903f7f56608b8da columnFamilyName cf 2024-12-13T07:35:23,110 DEBUG [StoreOpener-4c7a924e1bb62722de6c48ffcadf7b19-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:35:23,110 DEBUG [StoreOpener-6572c03e43180e068903f7f56608b8da-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:35:23,111 INFO [StoreOpener-4c7a924e1bb62722de6c48ffcadf7b19-1 {}] regionserver.HStore(327): Store=4c7a924e1bb62722de6c48ffcadf7b19/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T07:35:23,111 INFO [StoreOpener-6572c03e43180e068903f7f56608b8da-1 {}] regionserver.HStore(327): Store=6572c03e43180e068903f7f56608b8da/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T07:35:23,112 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testEmptyExportFileSystemState/6572c03e43180e068903f7f56608b8da 2024-12-13T07:35:23,112 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testEmptyExportFileSystemState/4c7a924e1bb62722de6c48ffcadf7b19 2024-12-13T07:35:23,112 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testEmptyExportFileSystemState/4c7a924e1bb62722de6c48ffcadf7b19 2024-12-13T07:35:23,112 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testEmptyExportFileSystemState/6572c03e43180e068903f7f56608b8da 2024-12-13T07:35:23,113 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1085): writing seq id for 6572c03e43180e068903f7f56608b8da 2024-12-13T07:35:23,113 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1085): writing seq id for 4c7a924e1bb62722de6c48ffcadf7b19 2024-12-13T07:35:23,115 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testEmptyExportFileSystemState/4c7a924e1bb62722de6c48ffcadf7b19/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T07:35:23,115 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testEmptyExportFileSystemState/6572c03e43180e068903f7f56608b8da/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T07:35:23,115 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1102): Opened 4c7a924e1bb62722de6c48ffcadf7b19; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66862338, jitterRate=-0.0036735236644744873}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T07:35:23,115 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1102): Opened 6572c03e43180e068903f7f56608b8da; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64730645, jitterRate=-0.03543822467327118}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T07:35:23,116 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1001): Region open journal for 6572c03e43180e068903f7f56608b8da: 2024-12-13T07:35:23,116 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1001): Region open journal for 4c7a924e1bb62722de6c48ffcadf7b19: 2024-12-13T07:35:23,116 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testEmptyExportFileSystemState,,1734075322434.4c7a924e1bb62722de6c48ffcadf7b19., pid=165, masterSystemTime=1734075322790 2024-12-13T07:35:23,116 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testEmptyExportFileSystemState,1,1734075322434.6572c03e43180e068903f7f56608b8da., pid=166, masterSystemTime=1734075322791 2024-12-13T07:35:23,117 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testEmptyExportFileSystemState,1,1734075322434.6572c03e43180e068903f7f56608b8da. 2024-12-13T07:35:23,117 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] handler.AssignRegionHandler(164): Opened testtb-testEmptyExportFileSystemState,1,1734075322434.6572c03e43180e068903f7f56608b8da. 2024-12-13T07:35:23,117 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=164 updating hbase:meta row=6572c03e43180e068903f7f56608b8da, regionState=OPEN, openSeqNum=2, regionLocation=9eb75de94ef3,41895,1734075182430 2024-12-13T07:35:23,117 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testEmptyExportFileSystemState,,1734075322434.4c7a924e1bb62722de6c48ffcadf7b19. 2024-12-13T07:35:23,118 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] handler.AssignRegionHandler(164): Opened testtb-testEmptyExportFileSystemState,,1734075322434.4c7a924e1bb62722de6c48ffcadf7b19. 2024-12-13T07:35:23,118 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=163 updating hbase:meta row=4c7a924e1bb62722de6c48ffcadf7b19, regionState=OPEN, openSeqNum=2, regionLocation=9eb75de94ef3,36021,1734075182342 2024-12-13T07:35:23,119 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=164 2024-12-13T07:35:23,120 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=164, state=SUCCESS; OpenRegionProcedure 6572c03e43180e068903f7f56608b8da, server=9eb75de94ef3,41895,1734075182430 in 481 msec 2024-12-13T07:35:23,120 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=165, resume processing ppid=163 2024-12-13T07:35:23,120 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, ppid=163, state=SUCCESS; OpenRegionProcedure 4c7a924e1bb62722de6c48ffcadf7b19, server=9eb75de94ef3,36021,1734075182342 in 484 msec 2024-12-13T07:35:23,120 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=162, state=SUCCESS; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=6572c03e43180e068903f7f56608b8da, ASSIGN in 642 msec 2024-12-13T07:35:23,121 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=163, resume processing ppid=162 2024-12-13T07:35:23,121 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, ppid=162, state=SUCCESS; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=4c7a924e1bb62722de6c48ffcadf7b19, ASSIGN in 642 msec 2024-12-13T07:35:23,121 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T07:35:23,121 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075323121"}]},"ts":"1734075323121"} 2024-12-13T07:35:23,122 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLED in hbase:meta 2024-12-13T07:35:23,190 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T07:35:23,190 DEBUG [PEWorker-2 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testEmptyExportFileSystemState jenkins: RWXCA 2024-12-13T07:35:23,194 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41895 {}] access.PermissionStorage(611): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-13T07:35:23,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:35:23,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:35:23,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:35:23,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:35:23,501 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-13T07:35:23,501 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-13T07:35:23,501 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-13T07:35:23,501 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T07:35:23,502 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T07:35:23,502 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T07:35:23,502 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-13T07:35:23,502 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T07:35:23,503 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, state=SUCCESS; CreateTableProcedure table=testtb-testEmptyExportFileSystemState in 1.0650 sec 2024-12-13T07:35:23,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-13T07:35:23,544 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 162 completed 2024-12-13T07:35:23,544 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testEmptyExportFileSystemState get assigned. Timeout = 60000ms 2024-12-13T07:35:23,544 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T07:35:23,548 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testEmptyExportFileSystemState assigned to meta. Checking AM states. 2024-12-13T07:35:23,549 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T07:35:23,549 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testEmptyExportFileSystemState assigned. 2024-12-13T07:35:23,552 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-13T07:35:23,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734075323552 (current time:1734075323552). 2024-12-13T07:35:23,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-13T07:35:23,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-13T07:35:23,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T07:35:23,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x38ba2a16 to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@129fdfd8 2024-12-13T07:35:23,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76ed0bf0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:35:23,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:35:23,567 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33840, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:35:23,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x38ba2a16 to 127.0.0.1:59810 2024-12-13T07:35:23,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:35:23,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3b015217 to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@570c3bb0 2024-12-13T07:35:23,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4920182a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:35:23,587 DEBUG [hconnection-0xa27a525-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:35:23,588 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33854, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:35:23,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:35:23,590 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55316, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:35:23,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3b015217 to 127.0.0.1:59810 2024-12-13T07:35:23,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:35:23,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] access.PermissionStorage(611): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-13T07:35:23,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T07:35:23,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-13T07:35:23,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 167 2024-12-13T07:35:23,592 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-13T07:35:23,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-13T07:35:23,593 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T07:35:23,595 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T07:35:23,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742189_1365 (size=185) 2024-12-13T07:35:23,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742189_1365 (size=185) 2024-12-13T07:35:23,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742189_1365 (size=185) 2024-12-13T07:35:23,600 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T07:35:23,600 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 4c7a924e1bb62722de6c48ffcadf7b19}, {pid=169, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 6572c03e43180e068903f7f56608b8da}] 2024-12-13T07:35:23,601 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=168, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 4c7a924e1bb62722de6c48ffcadf7b19 2024-12-13T07:35:23,601 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=169, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 6572c03e43180e068903f7f56608b8da 2024-12-13T07:35:23,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-13T07:35:23,752 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,41895,1734075182430 2024-12-13T07:35:23,752 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,36021,1734075182342 2024-12-13T07:35:23,753 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41895 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=169 2024-12-13T07:35:23,753 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36021 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=168 2024-12-13T07:35:23,754 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1734075322434.6572c03e43180e068903f7f56608b8da. 2024-12-13T07:35:23,754 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1734075322434.4c7a924e1bb62722de6c48ffcadf7b19. 2024-12-13T07:35:23,754 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.HRegion(2538): Flush status journal for 4c7a924e1bb62722de6c48ffcadf7b19: 2024-12-13T07:35:23,754 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.HRegion(2538): Flush status journal for 6572c03e43180e068903f7f56608b8da: 2024-12-13T07:35:23,754 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1734075322434.6572c03e43180e068903f7f56608b8da. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-13T07:35:23,754 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1734075322434.4c7a924e1bb62722de6c48ffcadf7b19. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-13T07:35:23,755 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1734075322434.6572c03e43180e068903f7f56608b8da.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-13T07:35:23,755 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1734075322434.4c7a924e1bb62722de6c48ffcadf7b19.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-13T07:35:23,755 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T07:35:23,755 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T07:35:23,755 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-13T07:35:23,755 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-13T07:35:23,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742190_1366 (size=76) 2024-12-13T07:35:23,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742190_1366 (size=76) 2024-12-13T07:35:23,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742190_1366 (size=76) 2024-12-13T07:35:23,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742191_1367 (size=76) 2024-12-13T07:35:23,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742191_1367 (size=76) 2024-12-13T07:35:23,773 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1734075322434.4c7a924e1bb62722de6c48ffcadf7b19. 2024-12-13T07:35:23,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742191_1367 (size=76) 2024-12-13T07:35:23,773 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=168 2024-12-13T07:35:23,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster(4106): Remote procedure done, pid=168 2024-12-13T07:35:23,773 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1734075322434.6572c03e43180e068903f7f56608b8da. 2024-12-13T07:35:23,773 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 4c7a924e1bb62722de6c48ffcadf7b19 2024-12-13T07:35:23,773 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=169 2024-12-13T07:35:23,774 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=168, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 4c7a924e1bb62722de6c48ffcadf7b19 2024-12-13T07:35:23,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster(4106): Remote procedure done, pid=169 2024-12-13T07:35:23,774 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 6572c03e43180e068903f7f56608b8da 2024-12-13T07:35:23,774 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=169, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 6572c03e43180e068903f7f56608b8da 2024-12-13T07:35:23,775 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; SnapshotRegionProcedure 4c7a924e1bb62722de6c48ffcadf7b19 in 174 msec 2024-12-13T07:35:23,775 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=169, resume processing ppid=167 2024-12-13T07:35:23,775 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, ppid=167, state=SUCCESS; SnapshotRegionProcedure 6572c03e43180e068903f7f56608b8da in 174 msec 2024-12-13T07:35:23,775 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T07:35:23,776 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T07:35:23,776 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T07:35:23,776 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testEmptyExportFileSystemState 2024-12-13T07:35:23,777 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-13T07:35:23,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742192_1368 (size=567) 2024-12-13T07:35:23,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742192_1368 (size=567) 2024-12-13T07:35:23,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742192_1368 (size=567) 2024-12-13T07:35:23,785 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T07:35:23,789 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T07:35:23,789 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-13T07:35:23,790 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T07:35:23,790 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 167 2024-12-13T07:35:23,791 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 199 msec 2024-12-13T07:35:23,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-13T07:35:23,896 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 167 completed 2024-12-13T07:35:23,908 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36021 {}] regionserver.HRegion(8254): writing data to region testtb-testEmptyExportFileSystemState,,1734075322434.4c7a924e1bb62722de6c48ffcadf7b19. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T07:35:23,909 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41895 {}] regionserver.HRegion(8254): writing data to region testtb-testEmptyExportFileSystemState,1,1734075322434.6572c03e43180e068903f7f56608b8da. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T07:35:23,915 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-12-13T07:35:23,915 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testEmptyExportFileSystemState,,1734075322434.4c7a924e1bb62722de6c48ffcadf7b19. 2024-12-13T07:35:23,915 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T07:35:23,926 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-13T07:35:23,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734075323926 (current time:1734075323926). 2024-12-13T07:35:23,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-13T07:35:23,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-13T07:35:23,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T07:35:23,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x44c80a39 to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@73f18108 2024-12-13T07:35:23,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@768f4bd5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:35:23,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:35:23,937 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33864, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:35:23,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x44c80a39 to 127.0.0.1:59810 2024-12-13T07:35:23,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:35:23,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7a6d7ccc to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@56b5b48 2024-12-13T07:35:23,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@93669d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:35:23,959 DEBUG [hconnection-0x767e3f6c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:35:23,960 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33872, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:35:23,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:35:23,963 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55324, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:35:23,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7a6d7ccc to 127.0.0.1:59810 2024-12-13T07:35:23,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:35:23,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] access.PermissionStorage(611): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-13T07:35:23,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T07:35:23,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=170, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-13T07:35:23,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 170 2024-12-13T07:35:23,967 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-13T07:35:23,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=170 2024-12-13T07:35:23,968 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T07:35:23,971 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T07:35:23,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742193_1369 (size=180) 2024-12-13T07:35:23,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742193_1369 (size=180) 2024-12-13T07:35:23,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742193_1369 (size=180) 2024-12-13T07:35:23,982 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T07:35:23,982 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=171, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 4c7a924e1bb62722de6c48ffcadf7b19}, {pid=172, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 6572c03e43180e068903f7f56608b8da}] 2024-12-13T07:35:23,982 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=172, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 6572c03e43180e068903f7f56608b8da 2024-12-13T07:35:23,982 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=171, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 4c7a924e1bb62722de6c48ffcadf7b19 2024-12-13T07:35:24,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=170 2024-12-13T07:35:24,134 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,36021,1734075182342 2024-12-13T07:35:24,134 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,41895,1734075182430 2024-12-13T07:35:24,135 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41895 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=172 2024-12-13T07:35:24,135 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36021 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=171 2024-12-13T07:35:24,135 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1734075322434.4c7a924e1bb62722de6c48ffcadf7b19. 2024-12-13T07:35:24,135 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1734075322434.6572c03e43180e068903f7f56608b8da. 2024-12-13T07:35:24,136 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HRegion(2837): Flushing 4c7a924e1bb62722de6c48ffcadf7b19 1/1 column families, dataSize=65 B heapSize=400 B 2024-12-13T07:35:24,136 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HRegion(2837): Flushing 6572c03e43180e068903f7f56608b8da 1/1 column families, dataSize=3.19 KB heapSize=7.14 KB 2024-12-13T07:35:24,156 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testEmptyExportFileSystemState/4c7a924e1bb62722de6c48ffcadf7b19/.tmp/cf/f2654422aa6c48b9a0bfe654c590cb71 is 69, key is 0d99479c8278fad4e2bb0b20cc9e4525f/cf:q/1734075323908/Put/seqid=0 2024-12-13T07:35:24,157 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testEmptyExportFileSystemState/6572c03e43180e068903f7f56608b8da/.tmp/cf/4079fb9f0d404665b5ddb665365ca7af is 71, key is 2a67b3a00222ae956316036a9b60367b/cf:q/1734075323909/Put/seqid=0 2024-12-13T07:35:24,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742194_1370 (size=5149) 2024-12-13T07:35:24,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742194_1370 (size=5149) 2024-12-13T07:35:24,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742195_1371 (size=8462) 2024-12-13T07:35:24,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742195_1371 (size=8462) 2024-12-13T07:35:24,166 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testEmptyExportFileSystemState/4c7a924e1bb62722de6c48ffcadf7b19/.tmp/cf/f2654422aa6c48b9a0bfe654c590cb71 2024-12-13T07:35:24,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742194_1370 (size=5149) 2024-12-13T07:35:24,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742195_1371 (size=8462) 2024-12-13T07:35:24,166 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.19 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testEmptyExportFileSystemState/6572c03e43180e068903f7f56608b8da/.tmp/cf/4079fb9f0d404665b5ddb665365ca7af 2024-12-13T07:35:24,170 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testEmptyExportFileSystemState/4c7a924e1bb62722de6c48ffcadf7b19/.tmp/cf/f2654422aa6c48b9a0bfe654c590cb71 as hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testEmptyExportFileSystemState/4c7a924e1bb62722de6c48ffcadf7b19/cf/f2654422aa6c48b9a0bfe654c590cb71 2024-12-13T07:35:24,170 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testEmptyExportFileSystemState/6572c03e43180e068903f7f56608b8da/.tmp/cf/4079fb9f0d404665b5ddb665365ca7af as hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testEmptyExportFileSystemState/6572c03e43180e068903f7f56608b8da/cf/4079fb9f0d404665b5ddb665365ca7af 2024-12-13T07:35:24,174 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testEmptyExportFileSystemState/6572c03e43180e068903f7f56608b8da/cf/4079fb9f0d404665b5ddb665365ca7af, entries=49, sequenceid=6, filesize=8.3 K 2024-12-13T07:35:24,174 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testEmptyExportFileSystemState/4c7a924e1bb62722de6c48ffcadf7b19/cf/f2654422aa6c48b9a0bfe654c590cb71, entries=1, sequenceid=6, filesize=5.0 K 2024-12-13T07:35:24,175 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HRegion(3040): Finished flush of dataSize ~65 B/65, heapSize ~384 B/384, currentSize=0 B/0 for 4c7a924e1bb62722de6c48ffcadf7b19 in 39ms, sequenceid=6, compaction requested=false 2024-12-13T07:35:24,175 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~3.19 KB/3271, heapSize ~7.13 KB/7296, currentSize=0 B/0 for 6572c03e43180e068903f7f56608b8da in 39ms, sequenceid=6, compaction requested=false 2024-12-13T07:35:24,175 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState' 2024-12-13T07:35:24,175 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState' 2024-12-13T07:35:24,175 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HRegion(2538): Flush status journal for 6572c03e43180e068903f7f56608b8da: 2024-12-13T07:35:24,175 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HRegion(2538): Flush status journal for 4c7a924e1bb62722de6c48ffcadf7b19: 2024-12-13T07:35:24,175 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1734075322434.6572c03e43180e068903f7f56608b8da. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-13T07:35:24,175 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1734075322434.4c7a924e1bb62722de6c48ffcadf7b19. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-13T07:35:24,175 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1734075322434.6572c03e43180e068903f7f56608b8da.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-13T07:35:24,175 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T07:35:24,175 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1734075322434.4c7a924e1bb62722de6c48ffcadf7b19.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-13T07:35:24,175 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testEmptyExportFileSystemState/6572c03e43180e068903f7f56608b8da/cf/4079fb9f0d404665b5ddb665365ca7af] hfiles 2024-12-13T07:35:24,175 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testEmptyExportFileSystemState/6572c03e43180e068903f7f56608b8da/cf/4079fb9f0d404665b5ddb665365ca7af for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-13T07:35:24,175 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T07:35:24,176 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testEmptyExportFileSystemState/4c7a924e1bb62722de6c48ffcadf7b19/cf/f2654422aa6c48b9a0bfe654c590cb71] hfiles 2024-12-13T07:35:24,176 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testEmptyExportFileSystemState/4c7a924e1bb62722de6c48ffcadf7b19/cf/f2654422aa6c48b9a0bfe654c590cb71 for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-13T07:35:24,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742196_1372 (size=115) 2024-12-13T07:35:24,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742196_1372 (size=115) 2024-12-13T07:35:24,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742196_1372 (size=115) 2024-12-13T07:35:24,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742197_1373 (size=115) 2024-12-13T07:35:24,185 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1734075322434.4c7a924e1bb62722de6c48ffcadf7b19. 2024-12-13T07:35:24,185 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=171 2024-12-13T07:35:24,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742197_1373 (size=115) 2024-12-13T07:35:24,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster(4106): Remote procedure done, pid=171 2024-12-13T07:35:24,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742197_1373 (size=115) 2024-12-13T07:35:24,186 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 4c7a924e1bb62722de6c48ffcadf7b19 2024-12-13T07:35:24,186 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=171, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 4c7a924e1bb62722de6c48ffcadf7b19 2024-12-13T07:35:24,186 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1734075322434.6572c03e43180e068903f7f56608b8da. 2024-12-13T07:35:24,186 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=172 2024-12-13T07:35:24,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster(4106): Remote procedure done, pid=172 2024-12-13T07:35:24,186 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 6572c03e43180e068903f7f56608b8da 2024-12-13T07:35:24,187 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=172, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 6572c03e43180e068903f7f56608b8da 2024-12-13T07:35:24,187 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, ppid=170, state=SUCCESS; SnapshotRegionProcedure 4c7a924e1bb62722de6c48ffcadf7b19 in 204 msec 2024-12-13T07:35:24,188 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=170 2024-12-13T07:35:24,188 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T07:35:24,188 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=170, state=SUCCESS; SnapshotRegionProcedure 6572c03e43180e068903f7f56608b8da in 205 msec 2024-12-13T07:35:24,188 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T07:35:24,189 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T07:35:24,189 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testEmptyExportFileSystemState 2024-12-13T07:35:24,189 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState 2024-12-13T07:35:24,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742198_1374 (size=645) 2024-12-13T07:35:24,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742198_1374 (size=645) 2024-12-13T07:35:24,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742198_1374 (size=645) 2024-12-13T07:35:24,198 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T07:35:24,202 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T07:35:24,202 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testEmptyExportFileSystemState 2024-12-13T07:35:24,203 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T07:35:24,203 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 170 2024-12-13T07:35:24,204 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 237 msec 2024-12-13T07:35:24,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=170 2024-12-13T07:35:24,272 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 170 completed 2024-12-13T07:35:24,272 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075324272 2024-12-13T07:35:24,272 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:41477, tgtDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075324272, rawTgtDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075324272, srcFsUri=hdfs://localhost:41477, srcDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:35:24,299 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:41477, inputRoot=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:35:24,299 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-313103365_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075324272, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075324272/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-13T07:35:24,300 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-13T07:35:24,303 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075324272/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-13T07:35:24,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742200_1376 (size=185) 2024-12-13T07:35:24,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742199_1375 (size=567) 2024-12-13T07:35:24,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742199_1375 (size=567) 2024-12-13T07:35:24,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742200_1376 (size=185) 2024-12-13T07:35:24,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742200_1376 (size=185) 2024-12-13T07:35:24,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742199_1375 (size=567) 2024-12-13T07:35:24,317 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-13T07:35:24,317 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-13T07:35:24,317 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-13T07:35:24,317 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-13T07:35:25,142 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/hadoop-529212513320437957.jar 2024-12-13T07:35:25,143 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-13T07:35:25,143 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-13T07:35:25,199 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/hadoop-7564728285287087202.jar 2024-12-13T07:35:25,199 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-13T07:35:25,199 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-13T07:35:25,200 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-13T07:35:25,200 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-13T07:35:25,200 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-13T07:35:25,200 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-13T07:35:25,200 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-13T07:35:25,200 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-13T07:35:25,201 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-13T07:35:25,201 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-13T07:35:25,201 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-13T07:35:25,201 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-13T07:35:25,201 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-13T07:35:25,201 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-13T07:35:25,201 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-13T07:35:25,202 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-13T07:35:25,202 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-13T07:35:25,202 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-13T07:35:25,202 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T07:35:25,202 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T07:35:25,202 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T07:35:25,203 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T07:35:25,203 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T07:35:25,203 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T07:35:25,203 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T07:35:25,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742201_1377 (size=127628) 2024-12-13T07:35:25,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742201_1377 (size=127628) 2024-12-13T07:35:25,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742201_1377 (size=127628) 2024-12-13T07:35:25,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742202_1378 (size=2172137) 2024-12-13T07:35:25,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742202_1378 (size=2172137) 2024-12-13T07:35:25,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742202_1378 (size=2172137) 2024-12-13T07:35:25,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742203_1379 (size=213228) 2024-12-13T07:35:25,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742203_1379 (size=213228) 2024-12-13T07:35:25,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742203_1379 (size=213228) 2024-12-13T07:35:25,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742204_1380 (size=1877034) 2024-12-13T07:35:25,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742204_1380 (size=1877034) 2024-12-13T07:35:25,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742204_1380 (size=1877034) 2024-12-13T07:35:25,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742205_1381 (size=533455) 2024-12-13T07:35:25,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742205_1381 (size=533455) 2024-12-13T07:35:25,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742205_1381 (size=533455) 2024-12-13T07:35:25,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742206_1382 (size=7280644) 2024-12-13T07:35:25,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742206_1382 (size=7280644) 2024-12-13T07:35:25,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742206_1382 (size=7280644) 2024-12-13T07:35:25,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742207_1383 (size=4188619) 2024-12-13T07:35:25,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742207_1383 (size=4188619) 2024-12-13T07:35:25,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742207_1383 (size=4188619) 2024-12-13T07:35:25,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742208_1384 (size=20406) 2024-12-13T07:35:25,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742208_1384 (size=20406) 2024-12-13T07:35:25,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742208_1384 (size=20406) 2024-12-13T07:35:25,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742209_1385 (size=75495) 2024-12-13T07:35:25,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742209_1385 (size=75495) 2024-12-13T07:35:25,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742209_1385 (size=75495) 2024-12-13T07:35:25,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742210_1386 (size=45609) 2024-12-13T07:35:25,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742210_1386 (size=45609) 2024-12-13T07:35:25,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742210_1386 (size=45609) 2024-12-13T07:35:25,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742211_1387 (size=110084) 2024-12-13T07:35:25,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742211_1387 (size=110084) 2024-12-13T07:35:25,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742211_1387 (size=110084) 2024-12-13T07:35:25,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742212_1388 (size=1323991) 2024-12-13T07:35:25,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742212_1388 (size=1323991) 2024-12-13T07:35:25,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742212_1388 (size=1323991) 2024-12-13T07:35:25,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742213_1389 (size=23076) 2024-12-13T07:35:25,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742213_1389 (size=23076) 2024-12-13T07:35:25,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742213_1389 (size=23076) 2024-12-13T07:35:25,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742214_1390 (size=126803) 2024-12-13T07:35:25,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742214_1390 (size=126803) 2024-12-13T07:35:25,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742214_1390 (size=126803) 2024-12-13T07:35:25,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742215_1391 (size=322274) 2024-12-13T07:35:25,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742215_1391 (size=322274) 2024-12-13T07:35:25,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742215_1391 (size=322274) 2024-12-13T07:35:25,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742216_1392 (size=451756) 2024-12-13T07:35:25,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742216_1392 (size=451756) 2024-12-13T07:35:25,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742216_1392 (size=451756) 2024-12-13T07:35:25,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742217_1393 (size=1832290) 2024-12-13T07:35:25,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742217_1393 (size=1832290) 2024-12-13T07:35:25,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742217_1393 (size=1832290) 2024-12-13T07:35:25,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742218_1394 (size=30081) 2024-12-13T07:35:25,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742218_1394 (size=30081) 2024-12-13T07:35:25,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742218_1394 (size=30081) 2024-12-13T07:35:25,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742219_1395 (size=53616) 2024-12-13T07:35:25,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742219_1395 (size=53616) 2024-12-13T07:35:25,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742219_1395 (size=53616) 2024-12-13T07:35:25,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742220_1396 (size=29229) 2024-12-13T07:35:25,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742220_1396 (size=29229) 2024-12-13T07:35:25,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742220_1396 (size=29229) 2024-12-13T07:35:25,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742221_1397 (size=169089) 2024-12-13T07:35:25,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742221_1397 (size=169089) 2024-12-13T07:35:25,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742221_1397 (size=169089) 2024-12-13T07:35:25,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742222_1398 (size=6350899) 2024-12-13T07:35:25,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742222_1398 (size=6350899) 2024-12-13T07:35:25,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742222_1398 (size=6350899) 2024-12-13T07:35:25,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742223_1399 (size=5175431) 2024-12-13T07:35:25,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742223_1399 (size=5175431) 2024-12-13T07:35:25,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742223_1399 (size=5175431) 2024-12-13T07:35:25,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742224_1400 (size=136454) 2024-12-13T07:35:25,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742224_1400 (size=136454) 2024-12-13T07:35:25,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742224_1400 (size=136454) 2024-12-13T07:35:25,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742225_1401 (size=907854) 2024-12-13T07:35:25,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742225_1401 (size=907854) 2024-12-13T07:35:25,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742225_1401 (size=907854) 2024-12-13T07:35:25,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742226_1402 (size=3317408) 2024-12-13T07:35:25,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742226_1402 (size=3317408) 2024-12-13T07:35:25,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742226_1402 (size=3317408) 2024-12-13T07:35:25,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742227_1403 (size=503880) 2024-12-13T07:35:25,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742227_1403 (size=503880) 2024-12-13T07:35:25,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742227_1403 (size=503880) 2024-12-13T07:35:25,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742228_1404 (size=4695811) 2024-12-13T07:35:25,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742228_1404 (size=4695811) 2024-12-13T07:35:25,959 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-13T07:35:25,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742228_1404 (size=4695811) 2024-12-13T07:35:25,962 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'emptySnaptb0-testEmptyExportFileSystemState' hfile list 2024-12-13T07:35:25,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742229_1405 (size=7) 2024-12-13T07:35:25,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742229_1405 (size=7) 2024-12-13T07:35:25,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742229_1405 (size=7) 2024-12-13T07:35:25,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742230_1406 (size=10) 2024-12-13T07:35:25,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742230_1406 (size=10) 2024-12-13T07:35:25,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742230_1406 (size=10) 2024-12-13T07:35:25,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742231_1407 (size=304784) 2024-12-13T07:35:25,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742231_1407 (size=304784) 2024-12-13T07:35:25,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742231_1407 (size=304784) 2024-12-13T07:35:26,015 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-13T07:35:26,015 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-13T07:35:26,341 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0007_000001 (auth:SIMPLE) from 127.0.0.1:57012 2024-12-13T07:35:27,755 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-13T07:35:30,631 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T07:35:31,541 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0007_000001 (auth:SIMPLE) from 127.0.0.1:53186 2024-12-13T07:35:31,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742232_1408 (size=350434) 2024-12-13T07:35:31,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742232_1408 (size=350434) 2024-12-13T07:35:31,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742232_1408 (size=350434) 2024-12-13T07:35:32,021 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-13T07:35:32,021 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-13T07:35:32,022 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-13T07:35:32,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742233_1409 (size=8568) 2024-12-13T07:35:32,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742233_1409 (size=8568) 2024-12-13T07:35:32,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742233_1409 (size=8568) 2024-12-13T07:35:32,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742234_1410 (size=460) 2024-12-13T07:35:32,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742234_1410 (size=460) 2024-12-13T07:35:32,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742234_1410 (size=460) 2024-12-13T07:35:32,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742235_1411 (size=8568) 2024-12-13T07:35:32,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742235_1411 (size=8568) 2024-12-13T07:35:32,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742235_1411 (size=8568) 2024-12-13T07:35:32,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742236_1412 (size=350434) 2024-12-13T07:35:32,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742236_1412 (size=350434) 2024-12-13T07:35:32,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742236_1412 (size=350434) 2024-12-13T07:35:34,125 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-13T07:35:34,125 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-13T07:35:34,131 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: emptySnaptb0-testEmptyExportFileSystemState 2024-12-13T07:35:34,131 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-13T07:35:34,132 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-13T07:35:34,132 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-313103365_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-13T07:35:34,132 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-13T07:35:34,132 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-13T07:35:34,132 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-313103365_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075324272/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075324272/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-13T07:35:34,133 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075324272/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-13T07:35:34,133 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075324272/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-13T07:35:34,138 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testEmptyExportFileSystemState 2024-12-13T07:35:34,138 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testEmptyExportFileSystemState 2024-12-13T07:35:34,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-13T07:35:34,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-13T07:35:34,140 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075334140"}]},"ts":"1734075334140"} 2024-12-13T07:35:34,141 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLING in hbase:meta 2024-12-13T07:35:34,222 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testEmptyExportFileSystemState to state=DISABLING 2024-12-13T07:35:34,223 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=174, ppid=173, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState}] 2024-12-13T07:35:34,224 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=175, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=4c7a924e1bb62722de6c48ffcadf7b19, UNASSIGN}, {pid=176, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=6572c03e43180e068903f7f56608b8da, UNASSIGN}] 2024-12-13T07:35:34,224 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=175, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=4c7a924e1bb62722de6c48ffcadf7b19, UNASSIGN 2024-12-13T07:35:34,225 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=176, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=6572c03e43180e068903f7f56608b8da, UNASSIGN 2024-12-13T07:35:34,225 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=176 updating hbase:meta row=6572c03e43180e068903f7f56608b8da, regionState=CLOSING, regionLocation=9eb75de94ef3,41895,1734075182430 2024-12-13T07:35:34,225 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=175 updating hbase:meta row=4c7a924e1bb62722de6c48ffcadf7b19, regionState=CLOSING, regionLocation=9eb75de94ef3,36021,1734075182342 2024-12-13T07:35:34,226 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T07:35:34,226 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=177, ppid=175, state=RUNNABLE; CloseRegionProcedure 4c7a924e1bb62722de6c48ffcadf7b19, server=9eb75de94ef3,36021,1734075182342}] 2024-12-13T07:35:34,226 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T07:35:34,226 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=178, ppid=176, state=RUNNABLE; CloseRegionProcedure 6572c03e43180e068903f7f56608b8da, server=9eb75de94ef3,41895,1734075182430}] 2024-12-13T07:35:34,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-13T07:35:34,377 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,41895,1734075182430 2024-12-13T07:35:34,377 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,36021,1734075182342 2024-12-13T07:35:34,379 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(124): Close 6572c03e43180e068903f7f56608b8da 2024-12-13T07:35:34,379 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] handler.UnassignRegionHandler(124): Close 4c7a924e1bb62722de6c48ffcadf7b19 2024-12-13T07:35:34,379 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T07:35:34,379 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T07:35:34,379 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1681): Closing 6572c03e43180e068903f7f56608b8da, disabling compactions & flushes 2024-12-13T07:35:34,379 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1681): Closing 4c7a924e1bb62722de6c48ffcadf7b19, disabling compactions & flushes 2024-12-13T07:35:34,379 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1703): Closing region testtb-testEmptyExportFileSystemState,,1734075322434.4c7a924e1bb62722de6c48ffcadf7b19. 2024-12-13T07:35:34,379 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1703): Closing region testtb-testEmptyExportFileSystemState,1,1734075322434.6572c03e43180e068903f7f56608b8da. 2024-12-13T07:35:34,380 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testEmptyExportFileSystemState,,1734075322434.4c7a924e1bb62722de6c48ffcadf7b19. 2024-12-13T07:35:34,380 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testEmptyExportFileSystemState,1,1734075322434.6572c03e43180e068903f7f56608b8da. 2024-12-13T07:35:34,380 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1791): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1734075322434.6572c03e43180e068903f7f56608b8da. after waiting 0 ms 2024-12-13T07:35:34,380 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1791): Acquired close lock on testtb-testEmptyExportFileSystemState,,1734075322434.4c7a924e1bb62722de6c48ffcadf7b19. after waiting 0 ms 2024-12-13T07:35:34,380 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1801): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1734075322434.6572c03e43180e068903f7f56608b8da. 2024-12-13T07:35:34,380 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1801): Updates disabled for region testtb-testEmptyExportFileSystemState,,1734075322434.4c7a924e1bb62722de6c48ffcadf7b19. 2024-12-13T07:35:34,387 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testEmptyExportFileSystemState/6572c03e43180e068903f7f56608b8da/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T07:35:34,387 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testEmptyExportFileSystemState/4c7a924e1bb62722de6c48ffcadf7b19/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T07:35:34,388 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T07:35:34,388 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1922): Closed testtb-testEmptyExportFileSystemState,1,1734075322434.6572c03e43180e068903f7f56608b8da. 2024-12-13T07:35:34,388 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1635): Region close journal for 6572c03e43180e068903f7f56608b8da: 2024-12-13T07:35:34,388 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T07:35:34,388 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1922): Closed testtb-testEmptyExportFileSystemState,,1734075322434.4c7a924e1bb62722de6c48ffcadf7b19. 2024-12-13T07:35:34,389 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1635): Region close journal for 4c7a924e1bb62722de6c48ffcadf7b19: 2024-12-13T07:35:34,390 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(170): Closed 6572c03e43180e068903f7f56608b8da 2024-12-13T07:35:34,390 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=176 updating hbase:meta row=6572c03e43180e068903f7f56608b8da, regionState=CLOSED 2024-12-13T07:35:34,390 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] handler.UnassignRegionHandler(170): Closed 4c7a924e1bb62722de6c48ffcadf7b19 2024-12-13T07:35:34,391 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=175 updating hbase:meta row=4c7a924e1bb62722de6c48ffcadf7b19, regionState=CLOSED 2024-12-13T07:35:34,393 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=178, resume processing ppid=176 2024-12-13T07:35:34,394 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=177, resume processing ppid=175 2024-12-13T07:35:34,394 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=178, ppid=176, state=SUCCESS; CloseRegionProcedure 6572c03e43180e068903f7f56608b8da, server=9eb75de94ef3,41895,1734075182430 in 166 msec 2024-12-13T07:35:34,394 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=177, ppid=175, state=SUCCESS; CloseRegionProcedure 4c7a924e1bb62722de6c48ffcadf7b19, server=9eb75de94ef3,36021,1734075182342 in 166 msec 2024-12-13T07:35:34,394 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=176, ppid=174, state=SUCCESS; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=6572c03e43180e068903f7f56608b8da, UNASSIGN in 169 msec 2024-12-13T07:35:34,395 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=175, resume processing ppid=174 2024-12-13T07:35:34,395 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=175, ppid=174, state=SUCCESS; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=4c7a924e1bb62722de6c48ffcadf7b19, UNASSIGN in 170 msec 2024-12-13T07:35:34,396 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=174, resume processing ppid=173 2024-12-13T07:35:34,396 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=174, ppid=173, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState in 172 msec 2024-12-13T07:35:34,397 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075334397"}]},"ts":"1734075334397"} 2024-12-13T07:35:34,398 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLED in hbase:meta 2024-12-13T07:35:34,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-13T07:35:34,541 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testEmptyExportFileSystemState to state=DISABLED 2024-12-13T07:35:34,543 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; DisableTableProcedure table=testtb-testEmptyExportFileSystemState in 404 msec 2024-12-13T07:35:34,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-13T07:35:34,744 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 173 completed 2024-12-13T07:35:34,744 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testEmptyExportFileSystemState 2024-12-13T07:35:34,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=179, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-13T07:35:34,745 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=179, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-13T07:35:34,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testEmptyExportFileSystemState 2024-12-13T07:35:34,746 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=179, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-13T07:35:34,747 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41895 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testEmptyExportFileSystemState 2024-12-13T07:35:34,749 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testEmptyExportFileSystemState/4c7a924e1bb62722de6c48ffcadf7b19 2024-12-13T07:35:34,749 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testEmptyExportFileSystemState/6572c03e43180e068903f7f56608b8da 2024-12-13T07:35:34,750 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testEmptyExportFileSystemState/4c7a924e1bb62722de6c48ffcadf7b19/cf, FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testEmptyExportFileSystemState/4c7a924e1bb62722de6c48ffcadf7b19/recovered.edits] 2024-12-13T07:35:34,750 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testEmptyExportFileSystemState/6572c03e43180e068903f7f56608b8da/cf, FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testEmptyExportFileSystemState/6572c03e43180e068903f7f56608b8da/recovered.edits] 2024-12-13T07:35:34,754 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testEmptyExportFileSystemState/6572c03e43180e068903f7f56608b8da/cf/4079fb9f0d404665b5ddb665365ca7af to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testEmptyExportFileSystemState/6572c03e43180e068903f7f56608b8da/cf/4079fb9f0d404665b5ddb665365ca7af 2024-12-13T07:35:34,754 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testEmptyExportFileSystemState/4c7a924e1bb62722de6c48ffcadf7b19/cf/f2654422aa6c48b9a0bfe654c590cb71 to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testEmptyExportFileSystemState/4c7a924e1bb62722de6c48ffcadf7b19/cf/f2654422aa6c48b9a0bfe654c590cb71 2024-12-13T07:35:34,756 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testEmptyExportFileSystemState/6572c03e43180e068903f7f56608b8da/recovered.edits/9.seqid to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testEmptyExportFileSystemState/6572c03e43180e068903f7f56608b8da/recovered.edits/9.seqid 2024-12-13T07:35:34,756 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testEmptyExportFileSystemState/4c7a924e1bb62722de6c48ffcadf7b19/recovered.edits/9.seqid to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testEmptyExportFileSystemState/4c7a924e1bb62722de6c48ffcadf7b19/recovered.edits/9.seqid 2024-12-13T07:35:34,757 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testEmptyExportFileSystemState/6572c03e43180e068903f7f56608b8da 2024-12-13T07:35:34,757 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testEmptyExportFileSystemState/4c7a924e1bb62722de6c48ffcadf7b19 2024-12-13T07:35:34,757 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testEmptyExportFileSystemState regions 2024-12-13T07:35:34,758 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=179, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-13T07:35:34,761 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testEmptyExportFileSystemState from hbase:meta 2024-12-13T07:35:34,763 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testEmptyExportFileSystemState' descriptor. 2024-12-13T07:35:34,764 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=179, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-13T07:35:34,764 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testEmptyExportFileSystemState' from region states. 2024-12-13T07:35:34,765 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,,1734075322434.4c7a924e1bb62722de6c48ffcadf7b19.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734075334764"}]},"ts":"9223372036854775807"} 2024-12-13T07:35:34,765 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,1,1734075322434.6572c03e43180e068903f7f56608b8da.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734075334764"}]},"ts":"9223372036854775807"} 2024-12-13T07:35:34,767 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-13T07:35:34,767 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 4c7a924e1bb62722de6c48ffcadf7b19, NAME => 'testtb-testEmptyExportFileSystemState,,1734075322434.4c7a924e1bb62722de6c48ffcadf7b19.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 6572c03e43180e068903f7f56608b8da, NAME => 'testtb-testEmptyExportFileSystemState,1,1734075322434.6572c03e43180e068903f7f56608b8da.', STARTKEY => '1', ENDKEY => ''}] 2024-12-13T07:35:34,767 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testEmptyExportFileSystemState' as deleted. 2024-12-13T07:35:34,767 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734075334767"}]},"ts":"9223372036854775807"} 2024-12-13T07:35:34,768 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-13T07:35:34,768 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-13T07:35:34,768 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-13T07:35:34,768 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-13T07:35:34,769 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-13T07:35:34,769 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testEmptyExportFileSystemState state from META 2024-12-13T07:35:34,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-13T07:35:34,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:35:34,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-13T07:35:34,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-13T07:35:34,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:35:34,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:35:34,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:35:34,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-13T07:35:34,784 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data null 2024-12-13T07:35:34,784 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data null 2024-12-13T07:35:34,784 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-13T07:35:34,784 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-13T07:35:34,785 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T07:35:34,785 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T07:35:34,785 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T07:35:34,785 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T07:35:34,785 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=179, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-13T07:35:34,786 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=179, state=SUCCESS; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState in 41 msec 2024-12-13T07:35:34,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-13T07:35:34,878 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 179 completed 2024-12-13T07:35:34,883 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testEmptyExportFileSystemState" 2024-12-13T07:35:34,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testEmptyExportFileSystemState 2024-12-13T07:35:34,886 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testEmptyExportFileSystemState" 2024-12-13T07:35:34,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testEmptyExportFileSystemState 2024-12-13T07:35:34,905 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=812 (was 795) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44663 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1375975952_1 at /127.0.0.1:45298 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1375975952_1 at /127.0.0.1:52402 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-313103365_22 at /127.0.0.1:45320 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x6218feab-shared-pool-39 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1658146201) connection to localhost/127.0.0.1:44663 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: hconnection-0x6218feab-shared-pool-40 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x6218feab-shared-pool-41 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-5681 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-313103365_22 at /127.0.0.1:57900 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1658146201) connection to localhost/127.0.0.1:46609 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: hconnection-0x6218feab-shared-pool-42 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46609 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-313103365_22 at /127.0.0.1:52436 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1658146201) connection to localhost/127.0.0.1:46277 from appattempt_1734075189398_0007_000001 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: process reaper (pid 70424) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=823 (was 799) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=440 (was 417) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=6316 (was 7188) 2024-12-13T07:35:34,905 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=812 is superior to 500 2024-12-13T07:35:34,919 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=812, OpenFileDescriptor=823, MaxFileDescriptor=1048576, SystemLoadAverage=440, ProcessCount=17, AvailableMemoryMB=6315 2024-12-13T07:35:34,919 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=812 is superior to 500 2024-12-13T07:35:34,921 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T07:35:34,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportWithChecksum 2024-12-13T07:35:34,922 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T07:35:34,922 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:35:34,922 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithChecksum" procId is: 180 2024-12-13T07:35:34,923 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T07:35:34,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=180 2024-12-13T07:35:34,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742237_1413 (size=404) 2024-12-13T07:35:34,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742237_1413 (size=404) 2024-12-13T07:35:34,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742237_1413 (size=404) 2024-12-13T07:35:34,930 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 5025487cefbea8591ac9d65826ff59af, NAME => 'testtb-testExportWithChecksum,,1734075334920.5025487cefbea8591ac9d65826ff59af.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:35:34,930 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => dbfbadb834b88850238be50b11c2116b, NAME => 'testtb-testExportWithChecksum,1,1734075334920.dbfbadb834b88850238be50b11c2116b.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:35:34,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742239_1415 (size=65) 2024-12-13T07:35:34,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742239_1415 (size=65) 2024-12-13T07:35:34,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742238_1414 (size=65) 2024-12-13T07:35:34,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742238_1414 (size=65) 2024-12-13T07:35:34,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742239_1415 (size=65) 2024-12-13T07:35:34,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742238_1414 (size=65) 2024-12-13T07:35:34,936 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithChecksum,,1734075334920.5025487cefbea8591ac9d65826ff59af.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:35:34,936 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1681): Closing 5025487cefbea8591ac9d65826ff59af, disabling compactions & flushes 2024-12-13T07:35:34,936 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithChecksum,,1734075334920.5025487cefbea8591ac9d65826ff59af. 2024-12-13T07:35:34,936 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithChecksum,,1734075334920.5025487cefbea8591ac9d65826ff59af. 2024-12-13T07:35:34,937 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithChecksum,,1734075334920.5025487cefbea8591ac9d65826ff59af. after waiting 0 ms 2024-12-13T07:35:34,937 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithChecksum,,1734075334920.5025487cefbea8591ac9d65826ff59af. 2024-12-13T07:35:34,937 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportWithChecksum,,1734075334920.5025487cefbea8591ac9d65826ff59af. 2024-12-13T07:35:34,937 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1635): Region close journal for 5025487cefbea8591ac9d65826ff59af: 2024-12-13T07:35:34,937 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithChecksum,1,1734075334920.dbfbadb834b88850238be50b11c2116b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:35:34,937 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1681): Closing dbfbadb834b88850238be50b11c2116b, disabling compactions & flushes 2024-12-13T07:35:34,937 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithChecksum,1,1734075334920.dbfbadb834b88850238be50b11c2116b. 2024-12-13T07:35:34,937 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithChecksum,1,1734075334920.dbfbadb834b88850238be50b11c2116b. 2024-12-13T07:35:34,937 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithChecksum,1,1734075334920.dbfbadb834b88850238be50b11c2116b. after waiting 0 ms 2024-12-13T07:35:34,937 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithChecksum,1,1734075334920.dbfbadb834b88850238be50b11c2116b. 2024-12-13T07:35:34,937 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportWithChecksum,1,1734075334920.dbfbadb834b88850238be50b11c2116b. 2024-12-13T07:35:34,937 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1635): Region close journal for dbfbadb834b88850238be50b11c2116b: 2024-12-13T07:35:34,938 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T07:35:34,938 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,,1734075334920.5025487cefbea8591ac9d65826ff59af.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1734075334938"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734075334938"}]},"ts":"1734075334938"} 2024-12-13T07:35:34,938 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,1,1734075334920.dbfbadb834b88850238be50b11c2116b.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1734075334938"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734075334938"}]},"ts":"1734075334938"} 2024-12-13T07:35:34,940 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-13T07:35:34,941 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T07:35:34,941 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075334941"}]},"ts":"1734075334941"} 2024-12-13T07:35:34,942 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithChecksum, state=ENABLING in hbase:meta 2024-12-13T07:35:34,959 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(202): Hosts are {9eb75de94ef3=0} racks are {/default-rack=0} 2024-12-13T07:35:34,960 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-13T07:35:34,960 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-13T07:35:34,960 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-13T07:35:34,960 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-13T07:35:34,960 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-13T07:35:34,960 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-13T07:35:34,960 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-13T07:35:34,961 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=5025487cefbea8591ac9d65826ff59af, ASSIGN}, {pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=dbfbadb834b88850238be50b11c2116b, ASSIGN}] 2024-12-13T07:35:34,962 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=dbfbadb834b88850238be50b11c2116b, ASSIGN 2024-12-13T07:35:34,962 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=5025487cefbea8591ac9d65826ff59af, ASSIGN 2024-12-13T07:35:34,962 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=dbfbadb834b88850238be50b11c2116b, ASSIGN; state=OFFLINE, location=9eb75de94ef3,42189,1734075182526; forceNewPlan=false, retain=false 2024-12-13T07:35:34,962 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=5025487cefbea8591ac9d65826ff59af, ASSIGN; state=OFFLINE, location=9eb75de94ef3,36021,1734075182342; forceNewPlan=false, retain=false 2024-12-13T07:35:35,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=180 2024-12-13T07:35:35,113 INFO [9eb75de94ef3:33713 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-13T07:35:35,113 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=182 updating hbase:meta row=dbfbadb834b88850238be50b11c2116b, regionState=OPENING, regionLocation=9eb75de94ef3,42189,1734075182526 2024-12-13T07:35:35,113 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=181 updating hbase:meta row=5025487cefbea8591ac9d65826ff59af, regionState=OPENING, regionLocation=9eb75de94ef3,36021,1734075182342 2024-12-13T07:35:35,116 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=183, ppid=182, state=RUNNABLE; OpenRegionProcedure dbfbadb834b88850238be50b11c2116b, server=9eb75de94ef3,42189,1734075182526}] 2024-12-13T07:35:35,118 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=184, ppid=181, state=RUNNABLE; OpenRegionProcedure 5025487cefbea8591ac9d65826ff59af, server=9eb75de94ef3,36021,1734075182342}] 2024-12-13T07:35:35,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=180 2024-12-13T07:35:35,269 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,42189,1734075182526 2024-12-13T07:35:35,271 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,36021,1734075182342 2024-12-13T07:35:35,272 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(135): Open testtb-testExportWithChecksum,1,1734075334920.dbfbadb834b88850238be50b11c2116b. 2024-12-13T07:35:35,272 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7285): Opening region: {ENCODED => dbfbadb834b88850238be50b11c2116b, NAME => 'testtb-testExportWithChecksum,1,1734075334920.dbfbadb834b88850238be50b11c2116b.', STARTKEY => '1', ENDKEY => ''} 2024-12-13T07:35:35,272 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithChecksum,1,1734075334920.dbfbadb834b88850238be50b11c2116b. service=AccessControlService 2024-12-13T07:35:35,273 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T07:35:35,273 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum dbfbadb834b88850238be50b11c2116b 2024-12-13T07:35:35,273 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(894): Instantiated testtb-testExportWithChecksum,1,1734075334920.dbfbadb834b88850238be50b11c2116b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:35:35,273 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7327): checking encryption for dbfbadb834b88850238be50b11c2116b 2024-12-13T07:35:35,273 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7330): checking classloading for dbfbadb834b88850238be50b11c2116b 2024-12-13T07:35:35,273 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(135): Open testtb-testExportWithChecksum,,1734075334920.5025487cefbea8591ac9d65826ff59af. 2024-12-13T07:35:35,273 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7285): Opening region: {ENCODED => 5025487cefbea8591ac9d65826ff59af, NAME => 'testtb-testExportWithChecksum,,1734075334920.5025487cefbea8591ac9d65826ff59af.', STARTKEY => '', ENDKEY => '1'} 2024-12-13T07:35:35,274 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithChecksum,,1734075334920.5025487cefbea8591ac9d65826ff59af. service=AccessControlService 2024-12-13T07:35:35,274 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T07:35:35,274 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 5025487cefbea8591ac9d65826ff59af 2024-12-13T07:35:35,274 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(894): Instantiated testtb-testExportWithChecksum,,1734075334920.5025487cefbea8591ac9d65826ff59af.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:35:35,274 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7327): checking encryption for 5025487cefbea8591ac9d65826ff59af 2024-12-13T07:35:35,274 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7330): checking classloading for 5025487cefbea8591ac9d65826ff59af 2024-12-13T07:35:35,275 INFO [StoreOpener-dbfbadb834b88850238be50b11c2116b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region dbfbadb834b88850238be50b11c2116b 2024-12-13T07:35:35,275 INFO [StoreOpener-5025487cefbea8591ac9d65826ff59af-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5025487cefbea8591ac9d65826ff59af 2024-12-13T07:35:35,276 INFO [StoreOpener-dbfbadb834b88850238be50b11c2116b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region dbfbadb834b88850238be50b11c2116b columnFamilyName cf 2024-12-13T07:35:35,276 INFO [StoreOpener-5025487cefbea8591ac9d65826ff59af-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5025487cefbea8591ac9d65826ff59af columnFamilyName cf 2024-12-13T07:35:35,276 DEBUG [StoreOpener-dbfbadb834b88850238be50b11c2116b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:35:35,276 DEBUG [StoreOpener-5025487cefbea8591ac9d65826ff59af-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:35:35,277 INFO [StoreOpener-5025487cefbea8591ac9d65826ff59af-1 {}] regionserver.HStore(327): Store=5025487cefbea8591ac9d65826ff59af/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T07:35:35,277 INFO [StoreOpener-dbfbadb834b88850238be50b11c2116b-1 {}] regionserver.HStore(327): Store=dbfbadb834b88850238be50b11c2116b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T07:35:35,277 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithChecksum/5025487cefbea8591ac9d65826ff59af 2024-12-13T07:35:35,278 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithChecksum/dbfbadb834b88850238be50b11c2116b 2024-12-13T07:35:35,278 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithChecksum/5025487cefbea8591ac9d65826ff59af 2024-12-13T07:35:35,278 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithChecksum/dbfbadb834b88850238be50b11c2116b 2024-12-13T07:35:35,279 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1085): writing seq id for 5025487cefbea8591ac9d65826ff59af 2024-12-13T07:35:35,280 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1085): writing seq id for dbfbadb834b88850238be50b11c2116b 2024-12-13T07:35:35,281 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithChecksum/5025487cefbea8591ac9d65826ff59af/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T07:35:35,281 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithChecksum/dbfbadb834b88850238be50b11c2116b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T07:35:35,281 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1102): Opened 5025487cefbea8591ac9d65826ff59af; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73447365, jitterRate=0.09445102512836456}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T07:35:35,281 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1102): Opened dbfbadb834b88850238be50b11c2116b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71866575, jitterRate=0.07089541852474213}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T07:35:35,282 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1001): Region open journal for 5025487cefbea8591ac9d65826ff59af: 2024-12-13T07:35:35,282 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1001): Region open journal for dbfbadb834b88850238be50b11c2116b: 2024-12-13T07:35:35,282 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithChecksum,1,1734075334920.dbfbadb834b88850238be50b11c2116b., pid=183, masterSystemTime=1734075335269 2024-12-13T07:35:35,282 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithChecksum,,1734075334920.5025487cefbea8591ac9d65826ff59af., pid=184, masterSystemTime=1734075335271 2024-12-13T07:35:35,283 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithChecksum,1,1734075334920.dbfbadb834b88850238be50b11c2116b. 2024-12-13T07:35:35,283 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(164): Opened testtb-testExportWithChecksum,1,1734075334920.dbfbadb834b88850238be50b11c2116b. 2024-12-13T07:35:35,284 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=182 updating hbase:meta row=dbfbadb834b88850238be50b11c2116b, regionState=OPEN, openSeqNum=2, regionLocation=9eb75de94ef3,42189,1734075182526 2024-12-13T07:35:35,284 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithChecksum,,1734075334920.5025487cefbea8591ac9d65826ff59af. 2024-12-13T07:35:35,284 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(164): Opened testtb-testExportWithChecksum,,1734075334920.5025487cefbea8591ac9d65826ff59af. 2024-12-13T07:35:35,284 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=181 updating hbase:meta row=5025487cefbea8591ac9d65826ff59af, regionState=OPEN, openSeqNum=2, regionLocation=9eb75de94ef3,36021,1734075182342 2024-12-13T07:35:35,286 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=183, resume processing ppid=182 2024-12-13T07:35:35,286 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=183, ppid=182, state=SUCCESS; OpenRegionProcedure dbfbadb834b88850238be50b11c2116b, server=9eb75de94ef3,42189,1734075182526 in 169 msec 2024-12-13T07:35:35,286 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=184, resume processing ppid=181 2024-12-13T07:35:35,286 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=184, ppid=181, state=SUCCESS; OpenRegionProcedure 5025487cefbea8591ac9d65826ff59af, server=9eb75de94ef3,36021,1734075182342 in 168 msec 2024-12-13T07:35:35,287 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=182, ppid=180, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=dbfbadb834b88850238be50b11c2116b, ASSIGN in 325 msec 2024-12-13T07:35:35,287 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=181, resume processing ppid=180 2024-12-13T07:35:35,287 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=181, ppid=180, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=5025487cefbea8591ac9d65826ff59af, ASSIGN in 325 msec 2024-12-13T07:35:35,288 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T07:35:35,288 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075335288"}]},"ts":"1734075335288"} 2024-12-13T07:35:35,289 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithChecksum, state=ENABLED in hbase:meta 2024-12-13T07:35:35,332 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T07:35:35,332 DEBUG [PEWorker-4 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportWithChecksum jenkins: RWXCA 2024-12-13T07:35:35,333 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41895 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-13T07:35:35,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:35:35,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:35:35,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:35:35,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:35:35,352 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T07:35:35,352 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T07:35:35,352 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T07:35:35,352 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T07:35:35,352 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-13T07:35:35,352 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-13T07:35:35,352 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-13T07:35:35,352 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-13T07:35:35,353 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=180, state=SUCCESS; CreateTableProcedure table=testtb-testExportWithChecksum in 430 msec 2024-12-13T07:35:35,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=180 2024-12-13T07:35:35,529 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportWithChecksum, procId: 180 completed 2024-12-13T07:35:35,529 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportWithChecksum get assigned. Timeout = 60000ms 2024-12-13T07:35:35,529 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T07:35:35,535 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportWithChecksum assigned to meta. Checking AM states. 2024-12-13T07:35:35,536 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T07:35:35,536 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportWithChecksum assigned. 2024-12-13T07:35:35,539 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-13T07:35:35,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734075335539 (current time:1734075335539). 2024-12-13T07:35:35,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-13T07:35:35,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-13T07:35:35,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T07:35:35,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x164ff745 to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@a1cce4f 2024-12-13T07:35:35,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5610a748, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:35:35,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:35:35,555 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48956, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:35:35,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x164ff745 to 127.0.0.1:59810 2024-12-13T07:35:35,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:35:35,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x48fa01cd to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@24f8d02b 2024-12-13T07:35:35,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7781191b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:35:35,600 DEBUG [hconnection-0x678cfd67-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:35:35,601 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48962, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:35:35,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:35:35,603 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50900, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:35:35,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x48fa01cd to 127.0.0.1:59810 2024-12-13T07:35:35,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:35:35,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-13T07:35:35,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T07:35:35,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=185, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-13T07:35:35,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 185 2024-12-13T07:35:35,606 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-13T07:35:35,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-13T07:35:35,607 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T07:35:35,609 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T07:35:35,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742240_1416 (size=161) 2024-12-13T07:35:35,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742240_1416 (size=161) 2024-12-13T07:35:35,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742240_1416 (size=161) 2024-12-13T07:35:35,615 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T07:35:35,615 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=186, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 5025487cefbea8591ac9d65826ff59af}, {pid=187, ppid=185, state=RUNNABLE; SnapshotRegionProcedure dbfbadb834b88850238be50b11c2116b}] 2024-12-13T07:35:35,615 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=186, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 5025487cefbea8591ac9d65826ff59af 2024-12-13T07:35:35,615 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=187, ppid=185, state=RUNNABLE; SnapshotRegionProcedure dbfbadb834b88850238be50b11c2116b 2024-12-13T07:35:35,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-13T07:35:35,766 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,36021,1734075182342 2024-12-13T07:35:35,766 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,42189,1734075182526 2024-12-13T07:35:35,767 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36021 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=186 2024-12-13T07:35:35,767 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42189 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=187 2024-12-13T07:35:35,767 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1734075334920.5025487cefbea8591ac9d65826ff59af. 2024-12-13T07:35:35,767 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1734075334920.dbfbadb834b88850238be50b11c2116b. 2024-12-13T07:35:35,767 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2538): Flush status journal for dbfbadb834b88850238be50b11c2116b: 2024-12-13T07:35:35,767 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2538): Flush status journal for 5025487cefbea8591ac9d65826ff59af: 2024-12-13T07:35:35,767 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1734075334920.5025487cefbea8591ac9d65826ff59af. for emptySnaptb0-testExportWithChecksum completed. 2024-12-13T07:35:35,767 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1734075334920.dbfbadb834b88850238be50b11c2116b. for emptySnaptb0-testExportWithChecksum completed. 2024-12-13T07:35:35,767 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1734075334920.5025487cefbea8591ac9d65826ff59af.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-13T07:35:35,767 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1734075334920.dbfbadb834b88850238be50b11c2116b.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-13T07:35:35,767 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T07:35:35,767 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T07:35:35,767 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-13T07:35:35,767 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-13T07:35:35,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742242_1418 (size=68) 2024-12-13T07:35:35,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742242_1418 (size=68) 2024-12-13T07:35:35,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742241_1417 (size=68) 2024-12-13T07:35:35,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742242_1418 (size=68) 2024-12-13T07:35:35,775 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1734075334920.5025487cefbea8591ac9d65826ff59af. 2024-12-13T07:35:35,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742241_1417 (size=68) 2024-12-13T07:35:35,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742241_1417 (size=68) 2024-12-13T07:35:35,775 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=186 2024-12-13T07:35:35,776 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1734075334920.dbfbadb834b88850238be50b11c2116b. 2024-12-13T07:35:35,776 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=187 2024-12-13T07:35:35,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster(4106): Remote procedure done, pid=186 2024-12-13T07:35:35,776 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 5025487cefbea8591ac9d65826ff59af 2024-12-13T07:35:35,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.HMaster(4106): Remote procedure done, pid=187 2024-12-13T07:35:35,776 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region dbfbadb834b88850238be50b11c2116b 2024-12-13T07:35:35,776 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=186, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 5025487cefbea8591ac9d65826ff59af 2024-12-13T07:35:35,777 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=187, ppid=185, state=RUNNABLE; SnapshotRegionProcedure dbfbadb834b88850238be50b11c2116b 2024-12-13T07:35:35,778 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=186, ppid=185, state=SUCCESS; SnapshotRegionProcedure 5025487cefbea8591ac9d65826ff59af in 162 msec 2024-12-13T07:35:35,779 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=187, resume processing ppid=185 2024-12-13T07:35:35,779 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=187, ppid=185, state=SUCCESS; SnapshotRegionProcedure dbfbadb834b88850238be50b11c2116b in 162 msec 2024-12-13T07:35:35,779 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T07:35:35,780 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T07:35:35,780 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T07:35:35,780 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithChecksum 2024-12-13T07:35:35,781 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum 2024-12-13T07:35:35,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742243_1419 (size=543) 2024-12-13T07:35:35,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742243_1419 (size=543) 2024-12-13T07:35:35,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742243_1419 (size=543) 2024-12-13T07:35:35,790 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T07:35:35,793 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T07:35:35,793 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/emptySnaptb0-testExportWithChecksum 2024-12-13T07:35:35,794 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T07:35:35,794 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 185 2024-12-13T07:35:35,795 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=185, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 189 msec 2024-12-13T07:35:35,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-13T07:35:35,910 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum, procId: 185 completed 2024-12-13T07:35:35,923 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36021 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithChecksum,,1734075334920.5025487cefbea8591ac9d65826ff59af. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T07:35:35,923 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42189 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithChecksum,1,1734075334920.dbfbadb834b88850238be50b11c2116b. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T07:35:35,926 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportWithChecksum 2024-12-13T07:35:35,926 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportWithChecksum,,1734075334920.5025487cefbea8591ac9d65826ff59af. 2024-12-13T07:35:35,926 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T07:35:35,937 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-13T07:35:35,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734075335937 (current time:1734075335937). 2024-12-13T07:35:35,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-13T07:35:35,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-13T07:35:35,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T07:35:35,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2d483c26 to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@57f37fd2 2024-12-13T07:35:35,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6780b489, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:35:35,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:35:35,984 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48970, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:35:35,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2d483c26 to 127.0.0.1:59810 2024-12-13T07:35:35,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:35:35,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7eda4404 to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@329bb9e2 2024-12-13T07:35:36,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@505d77c4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:35:36,003 DEBUG [hconnection-0x3d34f2b9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:35:36,004 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48976, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:35:36,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:35:36,006 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50904, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:35:36,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7eda4404 to 127.0.0.1:59810 2024-12-13T07:35:36,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:35:36,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-13T07:35:36,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T07:35:36,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=188, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-13T07:35:36,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 188 2024-12-13T07:35:36,009 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-13T07:35:36,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=188 2024-12-13T07:35:36,010 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T07:35:36,011 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T07:35:36,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742244_1420 (size=156) 2024-12-13T07:35:36,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742244_1420 (size=156) 2024-12-13T07:35:36,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742244_1420 (size=156) 2024-12-13T07:35:36,017 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T07:35:36,017 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=189, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 5025487cefbea8591ac9d65826ff59af}, {pid=190, ppid=188, state=RUNNABLE; SnapshotRegionProcedure dbfbadb834b88850238be50b11c2116b}] 2024-12-13T07:35:36,018 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=190, ppid=188, state=RUNNABLE; SnapshotRegionProcedure dbfbadb834b88850238be50b11c2116b 2024-12-13T07:35:36,018 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=189, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 5025487cefbea8591ac9d65826ff59af 2024-12-13T07:35:36,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=188 2024-12-13T07:35:36,169 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,36021,1734075182342 2024-12-13T07:35:36,169 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,42189,1734075182526 2024-12-13T07:35:36,169 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36021 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=189 2024-12-13T07:35:36,169 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42189 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=190 2024-12-13T07:35:36,169 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1734075334920.5025487cefbea8591ac9d65826ff59af. 2024-12-13T07:35:36,169 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1734075334920.dbfbadb834b88850238be50b11c2116b. 2024-12-13T07:35:36,170 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegion(2837): Flushing 5025487cefbea8591ac9d65826ff59af 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-13T07:35:36,170 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegion(2837): Flushing dbfbadb834b88850238be50b11c2116b 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-13T07:35:36,187 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithChecksum/dbfbadb834b88850238be50b11c2116b/.tmp/cf/b0062945bfe44cc8b6890c633a4a1cce is 71, key is 13723318c68102d35d17bcc6d0d49f63/cf:q/1734075335923/Put/seqid=0 2024-12-13T07:35:36,187 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithChecksum/5025487cefbea8591ac9d65826ff59af/.tmp/cf/ae5ef50022b44882800fc62a2c81e4f0 is 71, key is 091a25493a4abf23445978932c627059/cf:q/1734075335923/Put/seqid=0 2024-12-13T07:35:36,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742245_1421 (size=5422) 2024-12-13T07:35:36,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742245_1421 (size=5422) 2024-12-13T07:35:36,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742246_1422 (size=8188) 2024-12-13T07:35:36,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742245_1421 (size=5422) 2024-12-13T07:35:36,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742246_1422 (size=8188) 2024-12-13T07:35:36,193 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=333 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithChecksum/5025487cefbea8591ac9d65826ff59af/.tmp/cf/ae5ef50022b44882800fc62a2c81e4f0 2024-12-13T07:35:36,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742246_1422 (size=8188) 2024-12-13T07:35:36,193 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.93 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithChecksum/dbfbadb834b88850238be50b11c2116b/.tmp/cf/b0062945bfe44cc8b6890c633a4a1cce 2024-12-13T07:35:36,197 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithChecksum/dbfbadb834b88850238be50b11c2116b/.tmp/cf/b0062945bfe44cc8b6890c633a4a1cce as hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithChecksum/dbfbadb834b88850238be50b11c2116b/cf/b0062945bfe44cc8b6890c633a4a1cce 2024-12-13T07:35:36,197 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithChecksum/5025487cefbea8591ac9d65826ff59af/.tmp/cf/ae5ef50022b44882800fc62a2c81e4f0 as hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithChecksum/5025487cefbea8591ac9d65826ff59af/cf/ae5ef50022b44882800fc62a2c81e4f0 2024-12-13T07:35:36,201 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithChecksum/dbfbadb834b88850238be50b11c2116b/cf/b0062945bfe44cc8b6890c633a4a1cce, entries=45, sequenceid=6, filesize=8.0 K 2024-12-13T07:35:36,201 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithChecksum/5025487cefbea8591ac9d65826ff59af/cf/ae5ef50022b44882800fc62a2c81e4f0, entries=5, sequenceid=6, filesize=5.3 K 2024-12-13T07:35:36,201 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegion(3040): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for dbfbadb834b88850238be50b11c2116b in 32ms, sequenceid=6, compaction requested=false 2024-12-13T07:35:36,201 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegion(3040): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for 5025487cefbea8591ac9d65826ff59af in 32ms, sequenceid=6, compaction requested=false 2024-12-13T07:35:36,202 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-12-13T07:35:36,202 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-12-13T07:35:36,202 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegion(2538): Flush status journal for dbfbadb834b88850238be50b11c2116b: 2024-12-13T07:35:36,202 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegion(2538): Flush status journal for 5025487cefbea8591ac9d65826ff59af: 2024-12-13T07:35:36,202 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1734075334920.dbfbadb834b88850238be50b11c2116b. for snaptb0-testExportWithChecksum completed. 2024-12-13T07:35:36,202 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1734075334920.5025487cefbea8591ac9d65826ff59af. for snaptb0-testExportWithChecksum completed. 2024-12-13T07:35:36,202 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1734075334920.dbfbadb834b88850238be50b11c2116b.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-13T07:35:36,202 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T07:35:36,202 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1734075334920.5025487cefbea8591ac9d65826ff59af.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-13T07:35:36,202 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithChecksum/dbfbadb834b88850238be50b11c2116b/cf/b0062945bfe44cc8b6890c633a4a1cce] hfiles 2024-12-13T07:35:36,202 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T07:35:36,202 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithChecksum/dbfbadb834b88850238be50b11c2116b/cf/b0062945bfe44cc8b6890c633a4a1cce for snapshot=snaptb0-testExportWithChecksum 2024-12-13T07:35:36,202 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithChecksum/5025487cefbea8591ac9d65826ff59af/cf/ae5ef50022b44882800fc62a2c81e4f0] hfiles 2024-12-13T07:35:36,202 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithChecksum/5025487cefbea8591ac9d65826ff59af/cf/ae5ef50022b44882800fc62a2c81e4f0 for snapshot=snaptb0-testExportWithChecksum 2024-12-13T07:35:36,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742247_1423 (size=107) 2024-12-13T07:35:36,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742247_1423 (size=107) 2024-12-13T07:35:36,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742248_1424 (size=107) 2024-12-13T07:35:36,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742247_1423 (size=107) 2024-12-13T07:35:36,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742248_1424 (size=107) 2024-12-13T07:35:36,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742248_1424 (size=107) 2024-12-13T07:35:36,209 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1734075334920.5025487cefbea8591ac9d65826ff59af. 2024-12-13T07:35:36,209 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=189 2024-12-13T07:35:36,209 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1734075334920.dbfbadb834b88850238be50b11c2116b. 2024-12-13T07:35:36,209 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=190 2024-12-13T07:35:36,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.HMaster(4106): Remote procedure done, pid=189 2024-12-13T07:35:36,209 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 5025487cefbea8591ac9d65826ff59af 2024-12-13T07:35:36,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster(4106): Remote procedure done, pid=190 2024-12-13T07:35:36,209 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=189, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 5025487cefbea8591ac9d65826ff59af 2024-12-13T07:35:36,209 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region dbfbadb834b88850238be50b11c2116b 2024-12-13T07:35:36,209 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=190, ppid=188, state=RUNNABLE; SnapshotRegionProcedure dbfbadb834b88850238be50b11c2116b 2024-12-13T07:35:36,211 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=189, ppid=188, state=SUCCESS; SnapshotRegionProcedure 5025487cefbea8591ac9d65826ff59af in 193 msec 2024-12-13T07:35:36,211 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=190, resume processing ppid=188 2024-12-13T07:35:36,211 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T07:35:36,211 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=190, ppid=188, state=SUCCESS; SnapshotRegionProcedure dbfbadb834b88850238be50b11c2116b in 193 msec 2024-12-13T07:35:36,212 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T07:35:36,212 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T07:35:36,212 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithChecksum 2024-12-13T07:35:36,213 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-13T07:35:36,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742249_1425 (size=621) 2024-12-13T07:35:36,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742249_1425 (size=621) 2024-12-13T07:35:36,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742249_1425 (size=621) 2024-12-13T07:35:36,222 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T07:35:36,226 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T07:35:36,227 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-13T07:35:36,228 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T07:35:36,228 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 188 2024-12-13T07:35:36,229 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=188, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 220 msec 2024-12-13T07:35:36,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=188 2024-12-13T07:35:36,311 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum, procId: 188 completed 2024-12-13T07:35:36,312 INFO [Time-limited test {}] snapshot.TestExportSnapshot(476): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/local-export-1734075336311 2024-12-13T07:35:36,312 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/local-export-1734075336311, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/local-export-1734075336311, srcFsUri=hdfs://localhost:41477, srcDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:35:36,343 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:41477, inputRoot=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:35:36,343 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=org.apache.hadoop.fs.LocalFileSystem@576f0795, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/local-export-1734075336311, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/local-export-1734075336311/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-13T07:35:36,344 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-13T07:35:36,347 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testExportWithChecksum to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/local-export-1734075336311/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-13T07:35:36,372 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-13T07:35:36,372 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-13T07:35:36,372 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-13T07:35:36,372 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-13T07:35:37,210 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/hadoop-8162701000054436564.jar 2024-12-13T07:35:37,210 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-13T07:35:37,211 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-13T07:35:37,267 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/hadoop-5394554451025928065.jar 2024-12-13T07:35:37,268 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-13T07:35:37,268 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-13T07:35:37,268 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-13T07:35:37,268 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-13T07:35:37,268 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-13T07:35:37,268 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-13T07:35:37,269 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-13T07:35:37,269 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-13T07:35:37,269 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-13T07:35:37,269 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-13T07:35:37,269 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-13T07:35:37,270 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-13T07:35:37,270 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-13T07:35:37,270 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-13T07:35:37,270 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-13T07:35:37,270 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-13T07:35:37,270 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-13T07:35:37,271 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-13T07:35:37,271 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T07:35:37,271 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T07:35:37,271 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T07:35:37,271 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T07:35:37,271 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T07:35:37,272 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T07:35:37,272 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T07:35:37,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742250_1426 (size=127628) 2024-12-13T07:35:37,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742250_1426 (size=127628) 2024-12-13T07:35:37,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742250_1426 (size=127628) 2024-12-13T07:35:37,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742251_1427 (size=2172137) 2024-12-13T07:35:37,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742251_1427 (size=2172137) 2024-12-13T07:35:37,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742251_1427 (size=2172137) 2024-12-13T07:35:37,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742252_1428 (size=213228) 2024-12-13T07:35:37,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742252_1428 (size=213228) 2024-12-13T07:35:37,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742252_1428 (size=213228) 2024-12-13T07:35:37,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742253_1429 (size=1877034) 2024-12-13T07:35:37,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742253_1429 (size=1877034) 2024-12-13T07:35:37,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742253_1429 (size=1877034) 2024-12-13T07:35:37,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742254_1430 (size=451756) 2024-12-13T07:35:37,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742254_1430 (size=451756) 2024-12-13T07:35:37,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742254_1430 (size=451756) 2024-12-13T07:35:37,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742255_1431 (size=533455) 2024-12-13T07:35:37,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742255_1431 (size=533455) 2024-12-13T07:35:37,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742255_1431 (size=533455) 2024-12-13T07:35:37,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742256_1432 (size=7280644) 2024-12-13T07:35:37,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742256_1432 (size=7280644) 2024-12-13T07:35:37,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742256_1432 (size=7280644) 2024-12-13T07:35:37,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742257_1433 (size=4188619) 2024-12-13T07:35:37,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742257_1433 (size=4188619) 2024-12-13T07:35:37,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742257_1433 (size=4188619) 2024-12-13T07:35:37,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742258_1434 (size=20406) 2024-12-13T07:35:37,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742258_1434 (size=20406) 2024-12-13T07:35:37,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742258_1434 (size=20406) 2024-12-13T07:35:37,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742259_1435 (size=75495) 2024-12-13T07:35:37,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742259_1435 (size=75495) 2024-12-13T07:35:37,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742259_1435 (size=75495) 2024-12-13T07:35:37,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742260_1436 (size=45609) 2024-12-13T07:35:37,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742260_1436 (size=45609) 2024-12-13T07:35:37,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742260_1436 (size=45609) 2024-12-13T07:35:37,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742261_1437 (size=110084) 2024-12-13T07:35:37,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742261_1437 (size=110084) 2024-12-13T07:35:37,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742261_1437 (size=110084) 2024-12-13T07:35:37,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742262_1438 (size=1323991) 2024-12-13T07:35:37,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742262_1438 (size=1323991) 2024-12-13T07:35:37,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742262_1438 (size=1323991) 2024-12-13T07:35:37,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742263_1439 (size=23076) 2024-12-13T07:35:37,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742263_1439 (size=23076) 2024-12-13T07:35:37,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742263_1439 (size=23076) 2024-12-13T07:35:37,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742264_1440 (size=126803) 2024-12-13T07:35:37,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742264_1440 (size=126803) 2024-12-13T07:35:37,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742264_1440 (size=126803) 2024-12-13T07:35:37,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742265_1441 (size=322274) 2024-12-13T07:35:37,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742265_1441 (size=322274) 2024-12-13T07:35:37,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742265_1441 (size=322274) 2024-12-13T07:35:37,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742266_1442 (size=1832290) 2024-12-13T07:35:37,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742266_1442 (size=1832290) 2024-12-13T07:35:37,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742266_1442 (size=1832290) 2024-12-13T07:35:37,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742267_1443 (size=30081) 2024-12-13T07:35:37,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742267_1443 (size=30081) 2024-12-13T07:35:37,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742267_1443 (size=30081) 2024-12-13T07:35:37,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742268_1444 (size=53616) 2024-12-13T07:35:37,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742268_1444 (size=53616) 2024-12-13T07:35:37,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742268_1444 (size=53616) 2024-12-13T07:35:37,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742269_1445 (size=29229) 2024-12-13T07:35:37,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742269_1445 (size=29229) 2024-12-13T07:35:37,524 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-13T07:35:37,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742269_1445 (size=29229) 2024-12-13T07:35:37,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742270_1446 (size=169089) 2024-12-13T07:35:37,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742270_1446 (size=169089) 2024-12-13T07:35:37,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742270_1446 (size=169089) 2024-12-13T07:35:37,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742271_1447 (size=6350899) 2024-12-13T07:35:37,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742271_1447 (size=6350899) 2024-12-13T07:35:37,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742271_1447 (size=6350899) 2024-12-13T07:35:37,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742272_1448 (size=5175431) 2024-12-13T07:35:37,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742272_1448 (size=5175431) 2024-12-13T07:35:37,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742272_1448 (size=5175431) 2024-12-13T07:35:37,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742273_1449 (size=136454) 2024-12-13T07:35:37,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742273_1449 (size=136454) 2024-12-13T07:35:37,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742273_1449 (size=136454) 2024-12-13T07:35:37,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742274_1450 (size=907854) 2024-12-13T07:35:37,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742274_1450 (size=907854) 2024-12-13T07:35:37,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742274_1450 (size=907854) 2024-12-13T07:35:37,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742275_1451 (size=3317408) 2024-12-13T07:35:37,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742275_1451 (size=3317408) 2024-12-13T07:35:37,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742275_1451 (size=3317408) 2024-12-13T07:35:37,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742276_1452 (size=503880) 2024-12-13T07:35:37,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742276_1452 (size=503880) 2024-12-13T07:35:37,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742276_1452 (size=503880) 2024-12-13T07:35:37,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742277_1453 (size=4695811) 2024-12-13T07:35:37,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742277_1453 (size=4695811) 2024-12-13T07:35:37,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742277_1453 (size=4695811) 2024-12-13T07:35:37,634 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-13T07:35:37,636 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-13T07:35:37,638 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-13T07:35:37,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742278_1454 (size=338) 2024-12-13T07:35:37,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742278_1454 (size=338) 2024-12-13T07:35:37,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742278_1454 (size=338) 2024-12-13T07:35:37,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742279_1455 (size=15) 2024-12-13T07:35:37,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742279_1455 (size=15) 2024-12-13T07:35:37,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742279_1455 (size=15) 2024-12-13T07:35:37,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742280_1456 (size=304929) 2024-12-13T07:35:37,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742280_1456 (size=304929) 2024-12-13T07:35:37,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742280_1456 (size=304929) 2024-12-13T07:35:38,805 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-13T07:35:38,805 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-13T07:35:38,808 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0007_000001 (auth:SIMPLE) from 127.0.0.1:34900 2024-12-13T07:35:38,816 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-1_0/usercache/jenkins/appcache/application_1734075189398_0007/container_1734075189398_0007_01_000001/launch_container.sh] 2024-12-13T07:35:38,816 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-1_0/usercache/jenkins/appcache/application_1734075189398_0007/container_1734075189398_0007_01_000001/container_tokens] 2024-12-13T07:35:38,816 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-1_0/usercache/jenkins/appcache/application_1734075189398_0007/container_1734075189398_0007_01_000001/sysfs] 2024-12-13T07:35:39,683 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0008_000001 (auth:SIMPLE) from 127.0.0.1:39468 2024-12-13T07:35:42,021 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-13T07:35:42,021 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum Metrics about Tables on a single HBase RegionServer 2024-12-13T07:35:42,022 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-13T07:35:44,719 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0008_000001 (auth:SIMPLE) from 127.0.0.1:43584 2024-12-13T07:35:44,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742281_1457 (size=350603) 2024-12-13T07:35:44,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742281_1457 (size=350603) 2024-12-13T07:35:45,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742281_1457 (size=350603) 2024-12-13T07:35:46,936 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0008_000001 (auth:SIMPLE) from 127.0.0.1:35704 2024-12-13T07:35:47,524 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-13T07:35:50,086 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-1_1/usercache/jenkins/appcache/application_1734075189398_0008/container_1734075189398_0008_01_000002/launch_container.sh] 2024-12-13T07:35:50,086 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-1_1/usercache/jenkins/appcache/application_1734075189398_0008/container_1734075189398_0008_01_000002/container_tokens] 2024-12-13T07:35:50,086 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-1_1/usercache/jenkins/appcache/application_1734075189398_0008/container_1734075189398_0008_01_000002/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithChecksum/dbfbadb834b88850238be50b11c2116b/cf/b0062945bfe44cc8b6890c633a4a1cce and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/local-export-1734075336311/archive/data/default/testtb-testExportWithChecksum/dbfbadb834b88850238be50b11c2116b/cf/b0062945bfe44cc8b6890c633a4a1cce. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:596) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:332) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:254) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:180) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-13T07:35:51,812 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0008_000001 (auth:SIMPLE) from 127.0.0.1:49886 2024-12-13T07:35:54,174 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-1_2/usercache/jenkins/appcache/application_1734075189398_0008/container_1734075189398_0008_01_000003/launch_container.sh] 2024-12-13T07:35:54,174 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-1_2/usercache/jenkins/appcache/application_1734075189398_0008/container_1734075189398_0008_01_000003/container_tokens] 2024-12-13T07:35:54,174 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-1_2/usercache/jenkins/appcache/application_1734075189398_0008/container_1734075189398_0008_01_000003/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithChecksum/dbfbadb834b88850238be50b11c2116b/cf/b0062945bfe44cc8b6890c633a4a1cce and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/local-export-1734075336311/archive/data/default/testtb-testExportWithChecksum/dbfbadb834b88850238be50b11c2116b/cf/b0062945bfe44cc8b6890c633a4a1cce. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:596) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:332) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:254) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:180) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-13T07:35:55,579 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region aa4b6ee25986a85d04a0682e5b7b9bda, had cached 0 bytes from a total of 8256 2024-12-13T07:35:55,579 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 9e44ea00e032406763f2d24d62083e47, had cached 0 bytes from a total of 5356 2024-12-13T07:35:55,826 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0008_000001 (auth:SIMPLE) from 127.0.0.1:49894 2024-12-13T07:35:58,279 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-1_3/usercache/jenkins/appcache/application_1734075189398_0008/container_1734075189398_0008_01_000004/launch_container.sh] 2024-12-13T07:35:58,279 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-1_3/usercache/jenkins/appcache/application_1734075189398_0008/container_1734075189398_0008_01_000004/container_tokens] 2024-12-13T07:35:58,279 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-1_3/usercache/jenkins/appcache/application_1734075189398_0008/container_1734075189398_0008_01_000004/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithChecksum/dbfbadb834b88850238be50b11c2116b/cf/b0062945bfe44cc8b6890c633a4a1cce and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/local-export-1734075336311/archive/data/default/testtb-testExportWithChecksum/dbfbadb834b88850238be50b11c2116b/cf/b0062945bfe44cc8b6890c633a4a1cce. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:596) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:332) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:254) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:180) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-13T07:35:59,833 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0008_000001 (auth:SIMPLE) from 127.0.0.1:49900 2024-12-13T07:36:00,631 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T07:36:02,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742282_1458 (size=21340) 2024-12-13T07:36:02,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742282_1458 (size=21340) 2024-12-13T07:36:02,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742282_1458 (size=21340) 2024-12-13T07:36:02,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742283_1459 (size=460) 2024-12-13T07:36:02,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742283_1459 (size=460) 2024-12-13T07:36:02,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742283_1459 (size=460) 2024-12-13T07:36:02,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742284_1460 (size=21340) 2024-12-13T07:36:02,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742284_1460 (size=21340) 2024-12-13T07:36:02,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742284_1460 (size=21340) 2024-12-13T07:36:02,287 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-1_2/usercache/jenkins/appcache/application_1734075189398_0008/container_1734075189398_0008_01_000005/launch_container.sh] 2024-12-13T07:36:02,288 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-1_2/usercache/jenkins/appcache/application_1734075189398_0008/container_1734075189398_0008_01_000005/container_tokens] 2024-12-13T07:36:02,288 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-1_2/usercache/jenkins/appcache/application_1734075189398_0008/container_1734075189398_0008_01_000005/sysfs] 2024-12-13T07:36:02,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742285_1461 (size=350603) 2024-12-13T07:36:02,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742285_1461 (size=350603) 2024-12-13T07:36:02,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742285_1461 (size=350603) 2024-12-13T07:36:02,334 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0008_000001 (auth:SIMPLE) from 127.0.0.1:49502 2024-12-13T07:36:03,875 ERROR [Time-limited test {}] snapshot.ExportSnapshot(1227): Snapshot export failed org.apache.hadoop.hbase.snapshot.ExportSnapshotException: Task failed task_1734075189398_0008_m_000000 Job failed as tasks failed. failedMaps:1 failedReduces:0 killedMaps:0 killedReduces: 0 at org.apache.hadoop.hbase.snapshot.ExportSnapshot.runCopyJob(ExportSnapshot.java:935) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1204) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:151) ~[hbase-common-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:523) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemState(TestExportSnapshot.java:353) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportWithChecksum(TestExportSnapshot.java:237) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T07:36:03,876 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075363876 2024-12-13T07:36:03,876 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:41477, tgtDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075363876, rawTgtDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075363876, srcFsUri=hdfs://localhost:41477, srcDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:36:03,909 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:41477, inputRoot=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:36:03,909 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-313103365_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075363876, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075363876/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-13T07:36:03,910 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-13T07:36:03,914 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testExportWithChecksum to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075363876/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-13T07:36:03,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742286_1462 (size=156) 2024-12-13T07:36:03,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742286_1462 (size=156) 2024-12-13T07:36:03,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742286_1462 (size=156) 2024-12-13T07:36:03,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742287_1463 (size=621) 2024-12-13T07:36:03,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742287_1463 (size=621) 2024-12-13T07:36:03,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742287_1463 (size=621) 2024-12-13T07:36:03,926 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-13T07:36:03,927 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-13T07:36:03,927 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-13T07:36:03,928 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-13T07:36:04,771 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/hadoop-14662785636569137317.jar 2024-12-13T07:36:04,772 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-13T07:36:04,772 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-13T07:36:04,826 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/hadoop-10083553671374184843.jar 2024-12-13T07:36:04,826 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-13T07:36:04,827 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-13T07:36:04,827 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-13T07:36:04,827 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-13T07:36:04,827 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-13T07:36:04,827 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-13T07:36:04,827 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-13T07:36:04,828 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-13T07:36:04,828 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-13T07:36:04,828 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-13T07:36:04,828 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-13T07:36:04,828 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-13T07:36:04,828 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-13T07:36:04,828 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-13T07:36:04,829 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-13T07:36:04,829 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-13T07:36:04,829 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-13T07:36:04,829 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-13T07:36:04,829 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T07:36:04,829 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T07:36:04,829 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T07:36:04,830 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T07:36:04,830 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T07:36:04,830 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T07:36:04,830 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T07:36:04,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742288_1464 (size=451756) 2024-12-13T07:36:04,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742288_1464 (size=451756) 2024-12-13T07:36:04,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742288_1464 (size=451756) 2024-12-13T07:36:04,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742289_1465 (size=127628) 2024-12-13T07:36:04,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742289_1465 (size=127628) 2024-12-13T07:36:04,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742289_1465 (size=127628) 2024-12-13T07:36:04,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742290_1466 (size=2172137) 2024-12-13T07:36:04,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742290_1466 (size=2172137) 2024-12-13T07:36:04,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742290_1466 (size=2172137) 2024-12-13T07:36:04,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742291_1467 (size=213228) 2024-12-13T07:36:04,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742291_1467 (size=213228) 2024-12-13T07:36:04,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742291_1467 (size=213228) 2024-12-13T07:36:04,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742292_1468 (size=1877034) 2024-12-13T07:36:04,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742292_1468 (size=1877034) 2024-12-13T07:36:04,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742292_1468 (size=1877034) 2024-12-13T07:36:04,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742293_1469 (size=533455) 2024-12-13T07:36:04,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742293_1469 (size=533455) 2024-12-13T07:36:04,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742293_1469 (size=533455) 2024-12-13T07:36:04,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742294_1470 (size=7280644) 2024-12-13T07:36:04,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742294_1470 (size=7280644) 2024-12-13T07:36:04,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742294_1470 (size=7280644) 2024-12-13T07:36:04,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742295_1471 (size=4188619) 2024-12-13T07:36:04,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742295_1471 (size=4188619) 2024-12-13T07:36:04,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742295_1471 (size=4188619) 2024-12-13T07:36:04,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742296_1472 (size=20406) 2024-12-13T07:36:04,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742296_1472 (size=20406) 2024-12-13T07:36:04,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742296_1472 (size=20406) 2024-12-13T07:36:04,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742297_1473 (size=75495) 2024-12-13T07:36:04,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742297_1473 (size=75495) 2024-12-13T07:36:04,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742297_1473 (size=75495) 2024-12-13T07:36:04,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742298_1474 (size=45609) 2024-12-13T07:36:04,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742298_1474 (size=45609) 2024-12-13T07:36:04,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742298_1474 (size=45609) 2024-12-13T07:36:04,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742299_1475 (size=110084) 2024-12-13T07:36:04,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742299_1475 (size=110084) 2024-12-13T07:36:04,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742299_1475 (size=110084) 2024-12-13T07:36:04,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742300_1476 (size=1323991) 2024-12-13T07:36:04,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742300_1476 (size=1323991) 2024-12-13T07:36:04,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742300_1476 (size=1323991) 2024-12-13T07:36:04,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742301_1477 (size=23076) 2024-12-13T07:36:04,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742301_1477 (size=23076) 2024-12-13T07:36:04,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742301_1477 (size=23076) 2024-12-13T07:36:04,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742302_1478 (size=126803) 2024-12-13T07:36:04,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742302_1478 (size=126803) 2024-12-13T07:36:04,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742302_1478 (size=126803) 2024-12-13T07:36:04,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742303_1479 (size=322274) 2024-12-13T07:36:04,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742303_1479 (size=322274) 2024-12-13T07:36:04,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742303_1479 (size=322274) 2024-12-13T07:36:04,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742304_1480 (size=1832290) 2024-12-13T07:36:04,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742304_1480 (size=1832290) 2024-12-13T07:36:04,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742304_1480 (size=1832290) 2024-12-13T07:36:05,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742305_1481 (size=30081) 2024-12-13T07:36:05,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742305_1481 (size=30081) 2024-12-13T07:36:05,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742305_1481 (size=30081) 2024-12-13T07:36:05,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742306_1482 (size=53616) 2024-12-13T07:36:05,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742306_1482 (size=53616) 2024-12-13T07:36:05,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742306_1482 (size=53616) 2024-12-13T07:36:05,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742307_1483 (size=29229) 2024-12-13T07:36:05,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742307_1483 (size=29229) 2024-12-13T07:36:05,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742307_1483 (size=29229) 2024-12-13T07:36:05,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742308_1484 (size=169089) 2024-12-13T07:36:05,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742308_1484 (size=169089) 2024-12-13T07:36:05,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742308_1484 (size=169089) 2024-12-13T07:36:05,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742309_1485 (size=5175431) 2024-12-13T07:36:05,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742309_1485 (size=5175431) 2024-12-13T07:36:05,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742309_1485 (size=5175431) 2024-12-13T07:36:05,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742310_1486 (size=136454) 2024-12-13T07:36:05,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742310_1486 (size=136454) 2024-12-13T07:36:05,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742310_1486 (size=136454) 2024-12-13T07:36:05,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742311_1487 (size=6350899) 2024-12-13T07:36:05,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742311_1487 (size=6350899) 2024-12-13T07:36:05,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742311_1487 (size=6350899) 2024-12-13T07:36:05,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742312_1488 (size=907854) 2024-12-13T07:36:05,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742312_1488 (size=907854) 2024-12-13T07:36:05,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742312_1488 (size=907854) 2024-12-13T07:36:05,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742313_1489 (size=3317408) 2024-12-13T07:36:05,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742313_1489 (size=3317408) 2024-12-13T07:36:05,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742313_1489 (size=3317408) 2024-12-13T07:36:05,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742314_1490 (size=503880) 2024-12-13T07:36:05,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742314_1490 (size=503880) 2024-12-13T07:36:05,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742314_1490 (size=503880) 2024-12-13T07:36:05,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742315_1491 (size=4695811) 2024-12-13T07:36:05,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742315_1491 (size=4695811) 2024-12-13T07:36:05,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742315_1491 (size=4695811) 2024-12-13T07:36:05,104 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-13T07:36:05,106 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-13T07:36:05,107 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-13T07:36:05,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742316_1492 (size=338) 2024-12-13T07:36:05,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742316_1492 (size=338) 2024-12-13T07:36:05,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742316_1492 (size=338) 2024-12-13T07:36:05,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742317_1493 (size=15) 2024-12-13T07:36:05,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742317_1493 (size=15) 2024-12-13T07:36:05,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742317_1493 (size=15) 2024-12-13T07:36:05,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742318_1494 (size=304883) 2024-12-13T07:36:05,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742318_1494 (size=304883) 2024-12-13T07:36:05,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742318_1494 (size=304883) 2024-12-13T07:36:07,276 DEBUG [master/9eb75de94ef3:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 5025487cefbea8591ac9d65826ff59af changed from -1.0 to 0.0, refreshing cache 2024-12-13T07:36:07,276 DEBUG [master/9eb75de94ef3:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region aa4b6ee25986a85d04a0682e5b7b9bda changed from -1.0 to 0.0, refreshing cache 2024-12-13T07:36:07,276 DEBUG [master/9eb75de94ef3:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 9e44ea00e032406763f2d24d62083e47 changed from -1.0 to 0.0, refreshing cache 2024-12-13T07:36:07,276 DEBUG [master/9eb75de94ef3:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region dbfbadb834b88850238be50b11c2116b changed from -1.0 to 0.0, refreshing cache 2024-12-13T07:36:08,393 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-13T07:36:08,393 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-13T07:36:08,396 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0008_000001 (auth:SIMPLE) from 127.0.0.1:52078 2024-12-13T07:36:08,406 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-0_0/usercache/jenkins/appcache/application_1734075189398_0008/container_1734075189398_0008_01_000001/launch_container.sh] 2024-12-13T07:36:08,406 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-0_0/usercache/jenkins/appcache/application_1734075189398_0008/container_1734075189398_0008_01_000001/container_tokens] 2024-12-13T07:36:08,406 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-0_0/usercache/jenkins/appcache/application_1734075189398_0008/container_1734075189398_0008_01_000001/sysfs] 2024-12-13T07:36:09,275 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0009_000001 (auth:SIMPLE) from 127.0.0.1:49514 2024-12-13T07:36:14,284 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0009_000001 (auth:SIMPLE) from 127.0.0.1:39730 2024-12-13T07:36:14,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742319_1495 (size=350557) 2024-12-13T07:36:14,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742319_1495 (size=350557) 2024-12-13T07:36:14,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742319_1495 (size=350557) 2024-12-13T07:36:16,520 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0009_000001 (auth:SIMPLE) from 127.0.0.1:46602 2024-12-13T07:36:19,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742320_1496 (size=8188) 2024-12-13T07:36:19,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742320_1496 (size=8188) 2024-12-13T07:36:19,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742320_1496 (size=8188) 2024-12-13T07:36:19,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742321_1497 (size=5422) 2024-12-13T07:36:19,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742321_1497 (size=5422) 2024-12-13T07:36:19,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742321_1497 (size=5422) 2024-12-13T07:36:19,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742322_1498 (size=17413) 2024-12-13T07:36:19,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742322_1498 (size=17413) 2024-12-13T07:36:19,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742322_1498 (size=17413) 2024-12-13T07:36:19,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742323_1499 (size=462) 2024-12-13T07:36:19,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742323_1499 (size=462) 2024-12-13T07:36:19,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742323_1499 (size=462) 2024-12-13T07:36:20,002 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-0_1/usercache/jenkins/appcache/application_1734075189398_0009/container_1734075189398_0009_01_000002/launch_container.sh] 2024-12-13T07:36:20,003 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-0_1/usercache/jenkins/appcache/application_1734075189398_0009/container_1734075189398_0009_01_000002/container_tokens] 2024-12-13T07:36:20,003 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-0_1/usercache/jenkins/appcache/application_1734075189398_0009/container_1734075189398_0009_01_000002/sysfs] 2024-12-13T07:36:20,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742324_1500 (size=17413) 2024-12-13T07:36:20,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742324_1500 (size=17413) 2024-12-13T07:36:20,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742324_1500 (size=17413) 2024-12-13T07:36:20,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742325_1501 (size=350557) 2024-12-13T07:36:20,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742325_1501 (size=350557) 2024-12-13T07:36:20,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742325_1501 (size=350557) 2024-12-13T07:36:20,104 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0009_000001 (auth:SIMPLE) from 127.0.0.1:37936 2024-12-13T07:36:20,273 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region dbfbadb834b88850238be50b11c2116b, had cached 0 bytes from a total of 8188 2024-12-13T07:36:20,274 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 5025487cefbea8591ac9d65826ff59af, had cached 0 bytes from a total of 5422 2024-12-13T07:36:21,289 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-13T07:36:21,291 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-13T07:36:21,296 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testExportWithChecksum 2024-12-13T07:36:21,296 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-13T07:36:21,296 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-13T07:36:21,296 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-313103365_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-13T07:36:21,297 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-13T07:36:21,297 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-13T07:36:21,297 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-313103365_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075363876/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075363876/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-13T07:36:21,297 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075363876/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-13T07:36:21,297 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075363876/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-13T07:36:21,302 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportWithChecksum 2024-12-13T07:36:21,302 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportWithChecksum 2024-12-13T07:36:21,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=191, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportWithChecksum 2024-12-13T07:36:21,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-12-13T07:36:21,304 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075381304"}]},"ts":"1734075381304"} 2024-12-13T07:36:21,305 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithChecksum, state=DISABLING in hbase:meta 2024-12-13T07:36:21,349 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithChecksum to state=DISABLING 2024-12-13T07:36:21,350 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=192, ppid=191, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportWithChecksum}] 2024-12-13T07:36:21,352 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=193, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=5025487cefbea8591ac9d65826ff59af, UNASSIGN}, {pid=194, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=dbfbadb834b88850238be50b11c2116b, UNASSIGN}] 2024-12-13T07:36:21,353 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=193, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=5025487cefbea8591ac9d65826ff59af, UNASSIGN 2024-12-13T07:36:21,353 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=194, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=dbfbadb834b88850238be50b11c2116b, UNASSIGN 2024-12-13T07:36:21,353 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=193 updating hbase:meta row=5025487cefbea8591ac9d65826ff59af, regionState=CLOSING, regionLocation=9eb75de94ef3,36021,1734075182342 2024-12-13T07:36:21,353 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=194 updating hbase:meta row=dbfbadb834b88850238be50b11c2116b, regionState=CLOSING, regionLocation=9eb75de94ef3,42189,1734075182526 2024-12-13T07:36:21,355 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T07:36:21,355 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33713 {}] assignment.AssignmentManager(1526): Unable to acquire lock for regionNode state=CLOSING, location=9eb75de94ef3,42189,1734075182526, table=testtb-testExportWithChecksum, region=dbfbadb834b88850238be50b11c2116b. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-13T07:36:21,355 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=195, ppid=194, state=RUNNABLE; CloseRegionProcedure dbfbadb834b88850238be50b11c2116b, server=9eb75de94ef3,42189,1734075182526}] 2024-12-13T07:36:21,355 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T07:36:21,356 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=196, ppid=193, state=RUNNABLE; CloseRegionProcedure 5025487cefbea8591ac9d65826ff59af, server=9eb75de94ef3,36021,1734075182342}] 2024-12-13T07:36:21,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-12-13T07:36:21,507 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,42189,1734075182526 2024-12-13T07:36:21,507 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] handler.UnassignRegionHandler(124): Close dbfbadb834b88850238be50b11c2116b 2024-12-13T07:36:21,507 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T07:36:21,508 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1681): Closing dbfbadb834b88850238be50b11c2116b, disabling compactions & flushes 2024-12-13T07:36:21,508 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,36021,1734075182342 2024-12-13T07:36:21,508 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1703): Closing region testtb-testExportWithChecksum,1,1734075334920.dbfbadb834b88850238be50b11c2116b. 2024-12-13T07:36:21,508 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithChecksum,1,1734075334920.dbfbadb834b88850238be50b11c2116b. 2024-12-13T07:36:21,508 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithChecksum,1,1734075334920.dbfbadb834b88850238be50b11c2116b. after waiting 0 ms 2024-12-13T07:36:21,508 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithChecksum,1,1734075334920.dbfbadb834b88850238be50b11c2116b. 2024-12-13T07:36:21,508 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] handler.UnassignRegionHandler(124): Close 5025487cefbea8591ac9d65826ff59af 2024-12-13T07:36:21,508 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T07:36:21,508 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1681): Closing 5025487cefbea8591ac9d65826ff59af, disabling compactions & flushes 2024-12-13T07:36:21,508 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1703): Closing region testtb-testExportWithChecksum,,1734075334920.5025487cefbea8591ac9d65826ff59af. 2024-12-13T07:36:21,508 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithChecksum,,1734075334920.5025487cefbea8591ac9d65826ff59af. 2024-12-13T07:36:21,508 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithChecksum,,1734075334920.5025487cefbea8591ac9d65826ff59af. after waiting 0 ms 2024-12-13T07:36:21,508 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithChecksum,,1734075334920.5025487cefbea8591ac9d65826ff59af. 2024-12-13T07:36:21,519 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithChecksum/dbfbadb834b88850238be50b11c2116b/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T07:36:21,519 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithChecksum/5025487cefbea8591ac9d65826ff59af/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T07:36:21,520 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T07:36:21,520 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T07:36:21,520 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1922): Closed testtb-testExportWithChecksum,,1734075334920.5025487cefbea8591ac9d65826ff59af. 2024-12-13T07:36:21,520 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1922): Closed testtb-testExportWithChecksum,1,1734075334920.dbfbadb834b88850238be50b11c2116b. 2024-12-13T07:36:21,520 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1635): Region close journal for 5025487cefbea8591ac9d65826ff59af: 2024-12-13T07:36:21,520 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1635): Region close journal for dbfbadb834b88850238be50b11c2116b: 2024-12-13T07:36:21,521 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] handler.UnassignRegionHandler(170): Closed dbfbadb834b88850238be50b11c2116b 2024-12-13T07:36:21,522 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] handler.UnassignRegionHandler(170): Closed 5025487cefbea8591ac9d65826ff59af 2024-12-13T07:36:21,522 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=194 updating hbase:meta row=dbfbadb834b88850238be50b11c2116b, regionState=CLOSED 2024-12-13T07:36:21,522 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=193 updating hbase:meta row=5025487cefbea8591ac9d65826ff59af, regionState=CLOSED 2024-12-13T07:36:21,524 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=195, resume processing ppid=194 2024-12-13T07:36:21,525 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=196, resume processing ppid=193 2024-12-13T07:36:21,525 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=195, ppid=194, state=SUCCESS; CloseRegionProcedure dbfbadb834b88850238be50b11c2116b, server=9eb75de94ef3,42189,1734075182526 in 167 msec 2024-12-13T07:36:21,525 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=196, ppid=193, state=SUCCESS; CloseRegionProcedure 5025487cefbea8591ac9d65826ff59af, server=9eb75de94ef3,36021,1734075182342 in 167 msec 2024-12-13T07:36:21,525 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=194, ppid=192, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=dbfbadb834b88850238be50b11c2116b, UNASSIGN in 172 msec 2024-12-13T07:36:21,526 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=193, resume processing ppid=192 2024-12-13T07:36:21,526 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=193, ppid=192, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=5025487cefbea8591ac9d65826ff59af, UNASSIGN in 173 msec 2024-12-13T07:36:21,536 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=192, resume processing ppid=191 2024-12-13T07:36:21,536 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=192, ppid=191, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportWithChecksum in 180 msec 2024-12-13T07:36:21,538 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075381537"}]},"ts":"1734075381537"} 2024-12-13T07:36:21,539 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithChecksum, state=DISABLED in hbase:meta 2024-12-13T07:36:21,549 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithChecksum to state=DISABLED 2024-12-13T07:36:21,551 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=191, state=SUCCESS; DisableTableProcedure table=testtb-testExportWithChecksum in 248 msec 2024-12-13T07:36:21,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-12-13T07:36:21,606 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportWithChecksum, procId: 191 completed 2024-12-13T07:36:21,607 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportWithChecksum 2024-12-13T07:36:21,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=197, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-13T07:36:21,608 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=197, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-13T07:36:21,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportWithChecksum 2024-12-13T07:36:21,609 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=197, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-13T07:36:21,611 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithChecksum/5025487cefbea8591ac9d65826ff59af 2024-12-13T07:36:21,611 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithChecksum/dbfbadb834b88850238be50b11c2116b 2024-12-13T07:36:21,612 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41895 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportWithChecksum 2024-12-13T07:36:21,613 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithChecksum/dbfbadb834b88850238be50b11c2116b/cf, FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithChecksum/dbfbadb834b88850238be50b11c2116b/recovered.edits] 2024-12-13T07:36:21,613 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithChecksum/5025487cefbea8591ac9d65826ff59af/cf, FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithChecksum/5025487cefbea8591ac9d65826ff59af/recovered.edits] 2024-12-13T07:36:21,616 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithChecksum/dbfbadb834b88850238be50b11c2116b/cf/b0062945bfe44cc8b6890c633a4a1cce to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testExportWithChecksum/dbfbadb834b88850238be50b11c2116b/cf/b0062945bfe44cc8b6890c633a4a1cce 2024-12-13T07:36:21,616 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithChecksum/5025487cefbea8591ac9d65826ff59af/cf/ae5ef50022b44882800fc62a2c81e4f0 to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testExportWithChecksum/5025487cefbea8591ac9d65826ff59af/cf/ae5ef50022b44882800fc62a2c81e4f0 2024-12-13T07:36:21,618 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithChecksum/dbfbadb834b88850238be50b11c2116b/recovered.edits/9.seqid to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testExportWithChecksum/dbfbadb834b88850238be50b11c2116b/recovered.edits/9.seqid 2024-12-13T07:36:21,618 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithChecksum/5025487cefbea8591ac9d65826ff59af/recovered.edits/9.seqid to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testExportWithChecksum/5025487cefbea8591ac9d65826ff59af/recovered.edits/9.seqid 2024-12-13T07:36:21,618 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithChecksum/dbfbadb834b88850238be50b11c2116b 2024-12-13T07:36:21,618 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportWithChecksum/5025487cefbea8591ac9d65826ff59af 2024-12-13T07:36:21,618 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithChecksum regions 2024-12-13T07:36:21,620 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=197, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-13T07:36:21,622 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportWithChecksum from hbase:meta 2024-12-13T07:36:21,624 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportWithChecksum' descriptor. 2024-12-13T07:36:21,625 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=197, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-13T07:36:21,625 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportWithChecksum' from region states. 2024-12-13T07:36:21,625 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,,1734075334920.5025487cefbea8591ac9d65826ff59af.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734075381625"}]},"ts":"9223372036854775807"} 2024-12-13T07:36:21,625 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,1,1734075334920.dbfbadb834b88850238be50b11c2116b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734075381625"}]},"ts":"9223372036854775807"} 2024-12-13T07:36:21,627 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-13T07:36:21,627 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 5025487cefbea8591ac9d65826ff59af, NAME => 'testtb-testExportWithChecksum,,1734075334920.5025487cefbea8591ac9d65826ff59af.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => dbfbadb834b88850238be50b11c2116b, NAME => 'testtb-testExportWithChecksum,1,1734075334920.dbfbadb834b88850238be50b11c2116b.', STARTKEY => '1', ENDKEY => ''}] 2024-12-13T07:36:21,627 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportWithChecksum' as deleted. 2024-12-13T07:36:21,627 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734075381627"}]},"ts":"9223372036854775807"} 2024-12-13T07:36:21,629 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportWithChecksum state from META 2024-12-13T07:36:21,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-13T07:36:21,633 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-13T07:36:21,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-13T07:36:21,633 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-13T07:36:21,641 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-13T07:36:21,641 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-13T07:36:21,642 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=197, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-13T07:36:21,643 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=197, state=SUCCESS; DeleteTableProcedure table=testtb-testExportWithChecksum in 35 msec 2024-12-13T07:36:21,649 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:36:21,649 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-13T07:36:21,649 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-13T07:36:21,649 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:36:21,649 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:36:21,649 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data null 2024-12-13T07:36:21,649 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data null 2024-12-13T07:36:21,649 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-13T07:36:21,649 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-13T07:36:21,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:36:21,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=197 2024-12-13T07:36:21,650 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T07:36:21,650 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T07:36:21,650 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T07:36:21,651 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportWithChecksum, procId: 197 completed 2024-12-13T07:36:21,651 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T07:36:21,657 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithChecksum" 2024-12-13T07:36:21,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportWithChecksum 2024-12-13T07:36:21,660 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithChecksum" 2024-12-13T07:36:21,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportWithChecksum 2024-12-13T07:36:21,680 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=811 (was 812), OpenFileDescriptor=820 (was 823), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=386 (was 440), ProcessCount=17 (was 17), AvailableMemoryMB=5919 (was 6315) 2024-12-13T07:36:21,680 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=811 is superior to 500 2024-12-13T07:36:21,695 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=811, OpenFileDescriptor=820, MaxFileDescriptor=1048576, SystemLoadAverage=386, ProcessCount=17, AvailableMemoryMB=5918 2024-12-13T07:36:21,695 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=811 is superior to 500 2024-12-13T07:36:21,697 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T07:36:21,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=198, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:21,698 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T07:36:21,698 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSkipTmp" procId is: 198 2024-12-13T07:36:21,698 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:36:21,698 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T07:36:21,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=198 2024-12-13T07:36:21,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742326_1502 (size=418) 2024-12-13T07:36:21,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742326_1502 (size=418) 2024-12-13T07:36:21,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742326_1502 (size=418) 2024-12-13T07:36:21,708 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 66b14d577f845e3d69fc6ad302fa5d68, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1734075381696.66b14d577f845e3d69fc6ad302fa5d68.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:36:21,709 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 74db0d255f39256144c82caec65ee664, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1734075381696.74db0d255f39256144c82caec65ee664.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:36:21,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742327_1503 (size=79) 2024-12-13T07:36:21,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742327_1503 (size=79) 2024-12-13T07:36:21,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742327_1503 (size=79) 2024-12-13T07:36:21,723 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1734075381696.74db0d255f39256144c82caec65ee664.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:36:21,723 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1681): Closing 74db0d255f39256144c82caec65ee664, disabling compactions & flushes 2024-12-13T07:36:21,723 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1734075381696.74db0d255f39256144c82caec65ee664. 2024-12-13T07:36:21,723 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1734075381696.74db0d255f39256144c82caec65ee664. 2024-12-13T07:36:21,724 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1734075381696.74db0d255f39256144c82caec65ee664. after waiting 0 ms 2024-12-13T07:36:21,724 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1734075381696.74db0d255f39256144c82caec65ee664. 2024-12-13T07:36:21,724 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1734075381696.74db0d255f39256144c82caec65ee664. 2024-12-13T07:36:21,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742328_1504 (size=79) 2024-12-13T07:36:21,724 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1635): Region close journal for 74db0d255f39256144c82caec65ee664: 2024-12-13T07:36:21,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742328_1504 (size=79) 2024-12-13T07:36:21,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742328_1504 (size=79) 2024-12-13T07:36:21,724 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1734075381696.66b14d577f845e3d69fc6ad302fa5d68.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:36:21,724 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1681): Closing 66b14d577f845e3d69fc6ad302fa5d68, disabling compactions & flushes 2024-12-13T07:36:21,724 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1734075381696.66b14d577f845e3d69fc6ad302fa5d68. 2024-12-13T07:36:21,724 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1734075381696.66b14d577f845e3d69fc6ad302fa5d68. 2024-12-13T07:36:21,725 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1734075381696.66b14d577f845e3d69fc6ad302fa5d68. after waiting 0 ms 2024-12-13T07:36:21,725 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1734075381696.66b14d577f845e3d69fc6ad302fa5d68. 2024-12-13T07:36:21,725 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithSkipTmp,,1734075381696.66b14d577f845e3d69fc6ad302fa5d68. 2024-12-13T07:36:21,725 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1635): Region close journal for 66b14d577f845e3d69fc6ad302fa5d68: 2024-12-13T07:36:21,725 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T07:36:21,725 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1734075381696.74db0d255f39256144c82caec65ee664.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1734075381725"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734075381725"}]},"ts":"1734075381725"} 2024-12-13T07:36:21,726 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1734075381696.66b14d577f845e3d69fc6ad302fa5d68.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1734075381725"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734075381725"}]},"ts":"1734075381725"} 2024-12-13T07:36:21,728 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-13T07:36:21,729 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T07:36:21,729 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075381729"}]},"ts":"1734075381729"} 2024-12-13T07:36:21,730 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLING in hbase:meta 2024-12-13T07:36:21,775 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(202): Hosts are {9eb75de94ef3=0} racks are {/default-rack=0} 2024-12-13T07:36:21,776 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-13T07:36:21,776 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-13T07:36:21,776 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-13T07:36:21,776 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-13T07:36:21,776 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-13T07:36:21,776 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-13T07:36:21,776 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-13T07:36:21,776 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=199, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=66b14d577f845e3d69fc6ad302fa5d68, ASSIGN}, {pid=200, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=74db0d255f39256144c82caec65ee664, ASSIGN}] 2024-12-13T07:36:21,777 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=200, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=74db0d255f39256144c82caec65ee664, ASSIGN 2024-12-13T07:36:21,777 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=199, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=66b14d577f845e3d69fc6ad302fa5d68, ASSIGN 2024-12-13T07:36:21,777 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=200, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=74db0d255f39256144c82caec65ee664, ASSIGN; state=OFFLINE, location=9eb75de94ef3,42189,1734075182526; forceNewPlan=false, retain=false 2024-12-13T07:36:21,777 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=199, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=66b14d577f845e3d69fc6ad302fa5d68, ASSIGN; state=OFFLINE, location=9eb75de94ef3,41895,1734075182430; forceNewPlan=false, retain=false 2024-12-13T07:36:21,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=198 2024-12-13T07:36:21,928 INFO [9eb75de94ef3:33713 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-13T07:36:21,928 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=200 updating hbase:meta row=74db0d255f39256144c82caec65ee664, regionState=OPENING, regionLocation=9eb75de94ef3,42189,1734075182526 2024-12-13T07:36:21,928 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=199 updating hbase:meta row=66b14d577f845e3d69fc6ad302fa5d68, regionState=OPENING, regionLocation=9eb75de94ef3,41895,1734075182430 2024-12-13T07:36:21,931 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=201, ppid=200, state=RUNNABLE; OpenRegionProcedure 74db0d255f39256144c82caec65ee664, server=9eb75de94ef3,42189,1734075182526}] 2024-12-13T07:36:21,933 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=202, ppid=199, state=RUNNABLE; OpenRegionProcedure 66b14d577f845e3d69fc6ad302fa5d68, server=9eb75de94ef3,41895,1734075182430}] 2024-12-13T07:36:22,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=198 2024-12-13T07:36:22,021 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-13T07:36:22,085 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,42189,1734075182526 2024-12-13T07:36:22,086 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,41895,1734075182430 2024-12-13T07:36:22,092 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithSkipTmp,1,1734075381696.74db0d255f39256144c82caec65ee664. 2024-12-13T07:36:22,092 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7285): Opening region: {ENCODED => 74db0d255f39256144c82caec65ee664, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1734075381696.74db0d255f39256144c82caec65ee664.', STARTKEY => '1', ENDKEY => ''} 2024-12-13T07:36:22,093 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,1,1734075381696.74db0d255f39256144c82caec65ee664. service=AccessControlService 2024-12-13T07:36:22,093 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T07:36:22,094 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 74db0d255f39256144c82caec65ee664 2024-12-13T07:36:22,094 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1734075381696.74db0d255f39256144c82caec65ee664.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:36:22,094 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithSkipTmp,,1734075381696.66b14d577f845e3d69fc6ad302fa5d68. 2024-12-13T07:36:22,094 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7327): checking encryption for 74db0d255f39256144c82caec65ee664 2024-12-13T07:36:22,094 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7285): Opening region: {ENCODED => 66b14d577f845e3d69fc6ad302fa5d68, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1734075381696.66b14d577f845e3d69fc6ad302fa5d68.', STARTKEY => '', ENDKEY => '1'} 2024-12-13T07:36:22,094 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7330): checking classloading for 74db0d255f39256144c82caec65ee664 2024-12-13T07:36:22,095 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,,1734075381696.66b14d577f845e3d69fc6ad302fa5d68. service=AccessControlService 2024-12-13T07:36:22,095 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T07:36:22,096 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 66b14d577f845e3d69fc6ad302fa5d68 2024-12-13T07:36:22,096 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1734075381696.66b14d577f845e3d69fc6ad302fa5d68.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T07:36:22,096 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7327): checking encryption for 66b14d577f845e3d69fc6ad302fa5d68 2024-12-13T07:36:22,096 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7330): checking classloading for 66b14d577f845e3d69fc6ad302fa5d68 2024-12-13T07:36:22,097 INFO [StoreOpener-74db0d255f39256144c82caec65ee664-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 74db0d255f39256144c82caec65ee664 2024-12-13T07:36:22,098 INFO [StoreOpener-66b14d577f845e3d69fc6ad302fa5d68-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 66b14d577f845e3d69fc6ad302fa5d68 2024-12-13T07:36:22,098 INFO [StoreOpener-74db0d255f39256144c82caec65ee664-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 74db0d255f39256144c82caec65ee664 columnFamilyName cf 2024-12-13T07:36:22,099 INFO [StoreOpener-66b14d577f845e3d69fc6ad302fa5d68-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 66b14d577f845e3d69fc6ad302fa5d68 columnFamilyName cf 2024-12-13T07:36:22,099 DEBUG [StoreOpener-66b14d577f845e3d69fc6ad302fa5d68-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:36:22,099 DEBUG [StoreOpener-74db0d255f39256144c82caec65ee664-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T07:36:22,099 INFO [StoreOpener-66b14d577f845e3d69fc6ad302fa5d68-1 {}] regionserver.HStore(327): Store=66b14d577f845e3d69fc6ad302fa5d68/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T07:36:22,099 INFO [StoreOpener-74db0d255f39256144c82caec65ee664-1 {}] regionserver.HStore(327): Store=74db0d255f39256144c82caec65ee664/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T07:36:22,100 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithSkipTmp/66b14d577f845e3d69fc6ad302fa5d68 2024-12-13T07:36:22,100 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithSkipTmp/74db0d255f39256144c82caec65ee664 2024-12-13T07:36:22,100 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithSkipTmp/66b14d577f845e3d69fc6ad302fa5d68 2024-12-13T07:36:22,100 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithSkipTmp/74db0d255f39256144c82caec65ee664 2024-12-13T07:36:22,102 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(1085): writing seq id for 66b14d577f845e3d69fc6ad302fa5d68 2024-12-13T07:36:22,102 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(1085): writing seq id for 74db0d255f39256144c82caec65ee664 2024-12-13T07:36:22,104 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithSkipTmp/66b14d577f845e3d69fc6ad302fa5d68/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T07:36:22,103 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithSkipTmp/74db0d255f39256144c82caec65ee664/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T07:36:22,104 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(1102): Opened 66b14d577f845e3d69fc6ad302fa5d68; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65145591, jitterRate=-0.02925504744052887}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T07:36:22,104 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(1102): Opened 74db0d255f39256144c82caec65ee664; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63825296, jitterRate=-0.04892897605895996}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T07:36:22,104 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(1001): Region open journal for 66b14d577f845e3d69fc6ad302fa5d68: 2024-12-13T07:36:22,104 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(1001): Region open journal for 74db0d255f39256144c82caec65ee664: 2024-12-13T07:36:22,105 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,,1734075381696.66b14d577f845e3d69fc6ad302fa5d68., pid=202, masterSystemTime=1734075382086 2024-12-13T07:36:22,105 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,1,1734075381696.74db0d255f39256144c82caec65ee664., pid=201, masterSystemTime=1734075382085 2024-12-13T07:36:22,106 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,,1734075381696.66b14d577f845e3d69fc6ad302fa5d68. 2024-12-13T07:36:22,106 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithSkipTmp,,1734075381696.66b14d577f845e3d69fc6ad302fa5d68. 2024-12-13T07:36:22,106 DEBUG [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,1,1734075381696.74db0d255f39256144c82caec65ee664. 2024-12-13T07:36:22,107 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=199 updating hbase:meta row=66b14d577f845e3d69fc6ad302fa5d68, regionState=OPEN, openSeqNum=2, regionLocation=9eb75de94ef3,41895,1734075182430 2024-12-13T07:36:22,107 INFO [RS_OPEN_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithSkipTmp,1,1734075381696.74db0d255f39256144c82caec65ee664. 2024-12-13T07:36:22,107 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=200 updating hbase:meta row=74db0d255f39256144c82caec65ee664, regionState=OPEN, openSeqNum=2, regionLocation=9eb75de94ef3,42189,1734075182526 2024-12-13T07:36:22,109 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=202, resume processing ppid=199 2024-12-13T07:36:22,109 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=202, ppid=199, state=SUCCESS; OpenRegionProcedure 66b14d577f845e3d69fc6ad302fa5d68, server=9eb75de94ef3,41895,1734075182430 in 175 msec 2024-12-13T07:36:22,109 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=201, resume processing ppid=200 2024-12-13T07:36:22,110 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=201, ppid=200, state=SUCCESS; OpenRegionProcedure 74db0d255f39256144c82caec65ee664, server=9eb75de94ef3,42189,1734075182526 in 177 msec 2024-12-13T07:36:22,110 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=199, ppid=198, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=66b14d577f845e3d69fc6ad302fa5d68, ASSIGN in 333 msec 2024-12-13T07:36:22,110 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=200, resume processing ppid=198 2024-12-13T07:36:22,110 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=200, ppid=198, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=74db0d255f39256144c82caec65ee664, ASSIGN in 333 msec 2024-12-13T07:36:22,111 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T07:36:22,111 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075382111"}]},"ts":"1734075382111"} 2024-12-13T07:36:22,112 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLED in hbase:meta 2024-12-13T07:36:22,116 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T07:36:22,117 DEBUG [PEWorker-4 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportFileSystemStateWithSkipTmp jenkins: RWXCA 2024-12-13T07:36:22,118 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41895 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-13T07:36:22,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:36:22,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:36:22,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:36:22,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:36:22,158 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T07:36:22,158 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T07:36:22,158 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T07:36:22,158 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T07:36:22,158 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-13T07:36:22,158 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-13T07:36:22,158 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-13T07:36:22,158 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-13T07:36:22,159 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=198, state=SUCCESS; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 461 msec 2024-12-13T07:36:22,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=198 2024-12-13T07:36:22,304 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 198 completed 2024-12-13T07:36:22,304 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportFileSystemStateWithSkipTmp get assigned. Timeout = 60000ms 2024-12-13T07:36:22,305 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T07:36:22,310 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42189 {}] regionserver.StoreScanner(1133): Switch to stream read (scanned=32795 bytes) of info 2024-12-13T07:36:22,315 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned to meta. Checking AM states. 2024-12-13T07:36:22,315 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T07:36:22,316 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned. 2024-12-13T07:36:22,319 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-13T07:36:22,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734075382319 (current time:1734075382319). 2024-12-13T07:36:22,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-13T07:36:22,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-13T07:36:22,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T07:36:22,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2d76e70f to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@33b3cd67 2024-12-13T07:36:22,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f2fc579, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:36:22,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:36:22,336 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44358, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:36:22,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2d76e70f to 127.0.0.1:59810 2024-12-13T07:36:22,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:36:22,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x53cf75cb to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7e3a4220 2024-12-13T07:36:22,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3fff5493, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:36:22,351 DEBUG [hconnection-0x2b9bdef9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:36:22,352 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44374, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:36:22,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:36:22,354 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51514, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:36:22,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x53cf75cb to 127.0.0.1:59810 2024-12-13T07:36:22,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:36:22,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-13T07:36:22,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T07:36:22,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=203, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-13T07:36:22,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-13T07:36:22,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=203 2024-12-13T07:36:22,357 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-13T07:36:22,358 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T07:36:22,359 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T07:36:22,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742329_1505 (size=203) 2024-12-13T07:36:22,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742329_1505 (size=203) 2024-12-13T07:36:22,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742329_1505 (size=203) 2024-12-13T07:36:22,365 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T07:36:22,365 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=204, ppid=203, state=RUNNABLE; SnapshotRegionProcedure 66b14d577f845e3d69fc6ad302fa5d68}, {pid=205, ppid=203, state=RUNNABLE; SnapshotRegionProcedure 74db0d255f39256144c82caec65ee664}] 2024-12-13T07:36:22,366 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=205, ppid=203, state=RUNNABLE; SnapshotRegionProcedure 74db0d255f39256144c82caec65ee664 2024-12-13T07:36:22,366 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=204, ppid=203, state=RUNNABLE; SnapshotRegionProcedure 66b14d577f845e3d69fc6ad302fa5d68 2024-12-13T07:36:22,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=203 2024-12-13T07:36:22,517 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,42189,1734075182526 2024-12-13T07:36:22,517 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,41895,1734075182430 2024-12-13T07:36:22,518 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42189 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=205 2024-12-13T07:36:22,518 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41895 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=204 2024-12-13T07:36:22,519 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1734075381696.66b14d577f845e3d69fc6ad302fa5d68. 2024-12-13T07:36:22,519 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1734075381696.74db0d255f39256144c82caec65ee664. 2024-12-13T07:36:22,519 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2538): Flush status journal for 74db0d255f39256144c82caec65ee664: 2024-12-13T07:36:22,519 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2538): Flush status journal for 66b14d577f845e3d69fc6ad302fa5d68: 2024-12-13T07:36:22,519 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1734075381696.74db0d255f39256144c82caec65ee664. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-13T07:36:22,519 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1734075381696.66b14d577f845e3d69fc6ad302fa5d68. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-13T07:36:22,520 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1734075381696.66b14d577f845e3d69fc6ad302fa5d68.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:22,520 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1734075381696.74db0d255f39256144c82caec65ee664.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:22,520 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T07:36:22,520 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T07:36:22,520 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-13T07:36:22,520 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-13T07:36:22,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742330_1506 (size=82) 2024-12-13T07:36:22,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742331_1507 (size=82) 2024-12-13T07:36:22,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742330_1506 (size=82) 2024-12-13T07:36:22,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742331_1507 (size=82) 2024-12-13T07:36:22,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742330_1506 (size=82) 2024-12-13T07:36:22,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742331_1507 (size=82) 2024-12-13T07:36:22,531 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1734075381696.66b14d577f845e3d69fc6ad302fa5d68. 2024-12-13T07:36:22,531 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=204 2024-12-13T07:36:22,531 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1734075381696.74db0d255f39256144c82caec65ee664. 2024-12-13T07:36:22,531 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=205 2024-12-13T07:36:22,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster(4106): Remote procedure done, pid=204 2024-12-13T07:36:22,531 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 66b14d577f845e3d69fc6ad302fa5d68 2024-12-13T07:36:22,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.HMaster(4106): Remote procedure done, pid=205 2024-12-13T07:36:22,531 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 74db0d255f39256144c82caec65ee664 2024-12-13T07:36:22,531 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=204, ppid=203, state=RUNNABLE; SnapshotRegionProcedure 66b14d577f845e3d69fc6ad302fa5d68 2024-12-13T07:36:22,532 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=205, ppid=203, state=RUNNABLE; SnapshotRegionProcedure 74db0d255f39256144c82caec65ee664 2024-12-13T07:36:22,533 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=204, ppid=203, state=SUCCESS; SnapshotRegionProcedure 66b14d577f845e3d69fc6ad302fa5d68 in 167 msec 2024-12-13T07:36:22,534 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=205, resume processing ppid=203 2024-12-13T07:36:22,534 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=205, ppid=203, state=SUCCESS; SnapshotRegionProcedure 74db0d255f39256144c82caec65ee664 in 167 msec 2024-12-13T07:36:22,534 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T07:36:22,534 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T07:36:22,535 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T07:36:22,535 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:22,535 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:22,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742332_1508 (size=585) 2024-12-13T07:36:22,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742332_1508 (size=585) 2024-12-13T07:36:22,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742332_1508 (size=585) 2024-12-13T07:36:22,546 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T07:36:22,549 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T07:36:22,550 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:22,551 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T07:36:22,551 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-13T07:36:22,552 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=203, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 195 msec 2024-12-13T07:36:22,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=203 2024-12-13T07:36:22,660 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 203 completed 2024-12-13T07:36:22,672 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41895 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemStateWithSkipTmp,,1734075381696.66b14d577f845e3d69fc6ad302fa5d68. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T07:36:22,673 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42189 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemStateWithSkipTmp,1,1734075381696.74db0d255f39256144c82caec65ee664. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T07:36:22,675 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:22,675 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1734075381696.66b14d577f845e3d69fc6ad302fa5d68. 2024-12-13T07:36:22,675 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T07:36:22,684 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-13T07:36:22,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734075382684 (current time:1734075382684). 2024-12-13T07:36:22,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-13T07:36:22,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-13T07:36:22,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T07:36:22,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x45813f40 to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5f3aac73 2024-12-13T07:36:22,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d811d68, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:36:22,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:36:22,693 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44388, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:36:22,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x45813f40 to 127.0.0.1:59810 2024-12-13T07:36:22,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:36:22,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x23fdf282 to 127.0.0.1:59810 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@292e8bba 2024-12-13T07:36:22,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c91b9aa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T07:36:22,710 DEBUG [hconnection-0x46b9bfb7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:36:22,712 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44394, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:36:22,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T07:36:22,714 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51520, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T07:36:22,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x23fdf282 to 127.0.0.1:59810 2024-12-13T07:36:22,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:36:22,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-13T07:36:22,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T07:36:22,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=206, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-13T07:36:22,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 206 2024-12-13T07:36:22,718 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-13T07:36:22,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=206 2024-12-13T07:36:22,719 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T07:36:22,721 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T07:36:22,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742333_1509 (size=198) 2024-12-13T07:36:22,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742333_1509 (size=198) 2024-12-13T07:36:22,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742333_1509 (size=198) 2024-12-13T07:36:22,729 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T07:36:22,730 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=207, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 66b14d577f845e3d69fc6ad302fa5d68}, {pid=208, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 74db0d255f39256144c82caec65ee664}] 2024-12-13T07:36:22,731 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=207, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 66b14d577f845e3d69fc6ad302fa5d68 2024-12-13T07:36:22,731 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=208, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 74db0d255f39256144c82caec65ee664 2024-12-13T07:36:22,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=206 2024-12-13T07:36:22,882 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,41895,1734075182430 2024-12-13T07:36:22,882 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,42189,1734075182526 2024-12-13T07:36:22,883 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41895 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=207 2024-12-13T07:36:22,883 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42189 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=208 2024-12-13T07:36:22,883 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1734075381696.66b14d577f845e3d69fc6ad302fa5d68. 2024-12-13T07:36:22,883 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1734075381696.74db0d255f39256144c82caec65ee664. 2024-12-13T07:36:22,884 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegion(2837): Flushing 66b14d577f845e3d69fc6ad302fa5d68 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-13T07:36:22,884 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegion(2837): Flushing 74db0d255f39256144c82caec65ee664 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-13T07:36:22,902 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithSkipTmp/66b14d577f845e3d69fc6ad302fa5d68/.tmp/cf/d0a5adb89c744600be1051a63ad81854 is 71, key is 0243bacc69b46e6f66102b441097317a/cf:q/1734075382672/Put/seqid=0 2024-12-13T07:36:22,903 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithSkipTmp/74db0d255f39256144c82caec65ee664/.tmp/cf/cc2521918e904120bd34c6ed464560c2 is 71, key is 112ea84725b23fc9def074934d26edaa/cf:q/1734075382672/Put/seqid=0 2024-12-13T07:36:22,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742334_1510 (size=5354) 2024-12-13T07:36:22,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742334_1510 (size=5354) 2024-12-13T07:36:22,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742335_1511 (size=8256) 2024-12-13T07:36:22,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742334_1510 (size=5354) 2024-12-13T07:36:22,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742335_1511 (size=8256) 2024-12-13T07:36:22,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742335_1511 (size=8256) 2024-12-13T07:36:22,907 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithSkipTmp/66b14d577f845e3d69fc6ad302fa5d68/.tmp/cf/d0a5adb89c744600be1051a63ad81854 2024-12-13T07:36:22,908 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithSkipTmp/74db0d255f39256144c82caec65ee664/.tmp/cf/cc2521918e904120bd34c6ed464560c2 2024-12-13T07:36:22,911 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithSkipTmp/74db0d255f39256144c82caec65ee664/.tmp/cf/cc2521918e904120bd34c6ed464560c2 as hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithSkipTmp/74db0d255f39256144c82caec65ee664/cf/cc2521918e904120bd34c6ed464560c2 2024-12-13T07:36:22,911 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithSkipTmp/66b14d577f845e3d69fc6ad302fa5d68/.tmp/cf/d0a5adb89c744600be1051a63ad81854 as hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithSkipTmp/66b14d577f845e3d69fc6ad302fa5d68/cf/d0a5adb89c744600be1051a63ad81854 2024-12-13T07:36:22,914 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithSkipTmp/66b14d577f845e3d69fc6ad302fa5d68/cf/d0a5adb89c744600be1051a63ad81854, entries=4, sequenceid=6, filesize=5.2 K 2024-12-13T07:36:22,915 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithSkipTmp/74db0d255f39256144c82caec65ee664/cf/cc2521918e904120bd34c6ed464560c2, entries=46, sequenceid=6, filesize=8.1 K 2024-12-13T07:36:22,915 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegion(3040): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 66b14d577f845e3d69fc6ad302fa5d68 in 31ms, sequenceid=6, compaction requested=false 2024-12-13T07:36:22,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp' 2024-12-13T07:36:22,916 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegion(3040): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 74db0d255f39256144c82caec65ee664 in 31ms, sequenceid=6, compaction requested=false 2024-12-13T07:36:22,916 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegion(2538): Flush status journal for 66b14d577f845e3d69fc6ad302fa5d68: 2024-12-13T07:36:22,916 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1734075381696.66b14d577f845e3d69fc6ad302fa5d68. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-13T07:36:22,916 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegion(2538): Flush status journal for 74db0d255f39256144c82caec65ee664: 2024-12-13T07:36:22,916 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1734075381696.74db0d255f39256144c82caec65ee664. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-13T07:36:22,916 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1734075381696.66b14d577f845e3d69fc6ad302fa5d68.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:22,916 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T07:36:22,916 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithSkipTmp/66b14d577f845e3d69fc6ad302fa5d68/cf/d0a5adb89c744600be1051a63ad81854] hfiles 2024-12-13T07:36:22,916 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1734075381696.74db0d255f39256144c82caec65ee664.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:22,916 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithSkipTmp/66b14d577f845e3d69fc6ad302fa5d68/cf/d0a5adb89c744600be1051a63ad81854 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:22,916 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T07:36:22,916 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithSkipTmp/74db0d255f39256144c82caec65ee664/cf/cc2521918e904120bd34c6ed464560c2] hfiles 2024-12-13T07:36:22,916 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithSkipTmp/74db0d255f39256144c82caec65ee664/cf/cc2521918e904120bd34c6ed464560c2 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:22,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742336_1512 (size=121) 2024-12-13T07:36:22,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742337_1513 (size=121) 2024-12-13T07:36:22,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742336_1512 (size=121) 2024-12-13T07:36:22,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742337_1513 (size=121) 2024-12-13T07:36:22,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742337_1513 (size=121) 2024-12-13T07:36:22,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742336_1512 (size=121) 2024-12-13T07:36:22,928 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1734075381696.74db0d255f39256144c82caec65ee664. 2024-12-13T07:36:22,928 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1734075381696.66b14d577f845e3d69fc6ad302fa5d68. 2024-12-13T07:36:22,928 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=208 2024-12-13T07:36:22,929 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9eb75de94ef3:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=207 2024-12-13T07:36:22,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] master.HMaster(4106): Remote procedure done, pid=207 2024-12-13T07:36:22,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster(4106): Remote procedure done, pid=208 2024-12-13T07:36:22,929 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 66b14d577f845e3d69fc6ad302fa5d68 2024-12-13T07:36:22,929 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 74db0d255f39256144c82caec65ee664 2024-12-13T07:36:22,929 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=208, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 74db0d255f39256144c82caec65ee664 2024-12-13T07:36:22,929 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=207, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 66b14d577f845e3d69fc6ad302fa5d68 2024-12-13T07:36:22,930 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=207, ppid=206, state=SUCCESS; SnapshotRegionProcedure 66b14d577f845e3d69fc6ad302fa5d68 in 200 msec 2024-12-13T07:36:22,931 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=208, resume processing ppid=206 2024-12-13T07:36:22,931 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T07:36:22,931 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=208, ppid=206, state=SUCCESS; SnapshotRegionProcedure 74db0d255f39256144c82caec65ee664 in 200 msec 2024-12-13T07:36:22,931 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T07:36:22,932 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T07:36:22,932 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:22,932 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:22,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742338_1514 (size=663) 2024-12-13T07:36:22,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742338_1514 (size=663) 2024-12-13T07:36:22,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742338_1514 (size=663) 2024-12-13T07:36:22,944 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T07:36:22,948 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T07:36:22,949 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:22,950 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T07:36:22,950 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 206 2024-12-13T07:36:22,950 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=206, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 234 msec 2024-12-13T07:36:23,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=206 2024-12-13T07:36:23,022 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 206 completed 2024-12-13T07:36:23,023 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075383022 2024-12-13T07:36:23,023 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:41477, tgtDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075383022, rawTgtDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075383022, srcFsUri=hdfs://localhost:41477, srcDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:36:23,051 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:41477, inputRoot=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534 2024-12-13T07:36:23,051 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-313103365_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075383022, skipTmp=true, initialOutputSnapshotDir=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075383022/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:23,052 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-13T07:36:23,055 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075383022/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:23,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742340_1516 (size=663) 2024-12-13T07:36:23,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742339_1515 (size=198) 2024-12-13T07:36:23,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742340_1516 (size=663) 2024-12-13T07:36:23,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742339_1515 (size=198) 2024-12-13T07:36:23,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742340_1516 (size=663) 2024-12-13T07:36:23,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742339_1515 (size=198) 2024-12-13T07:36:23,065 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-13T07:36:23,065 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-13T07:36:23,065 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-13T07:36:23,065 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-13T07:36:23,860 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/hadoop-13782047515380139629.jar 2024-12-13T07:36:23,860 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-13T07:36:23,860 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-13T07:36:23,915 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/hadoop-15967966061474631061.jar 2024-12-13T07:36:23,916 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-13T07:36:23,916 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-13T07:36:23,916 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-13T07:36:23,917 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-13T07:36:23,917 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-13T07:36:23,917 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-13T07:36:23,917 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-13T07:36:23,917 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-13T07:36:23,917 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-13T07:36:23,918 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-13T07:36:23,918 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-13T07:36:23,918 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-13T07:36:23,918 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-13T07:36:23,919 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-13T07:36:23,919 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-13T07:36:23,919 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-13T07:36:23,919 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-13T07:36:23,919 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-13T07:36:23,920 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T07:36:23,920 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T07:36:23,920 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T07:36:23,920 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T07:36:23,921 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T07:36:23,921 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T07:36:23,921 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T07:36:23,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742341_1517 (size=6350899) 2024-12-13T07:36:23,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742341_1517 (size=6350899) 2024-12-13T07:36:23,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742341_1517 (size=6350899) 2024-12-13T07:36:23,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742342_1518 (size=127628) 2024-12-13T07:36:23,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742342_1518 (size=127628) 2024-12-13T07:36:23,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742342_1518 (size=127628) 2024-12-13T07:36:24,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742343_1519 (size=2172137) 2024-12-13T07:36:24,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742343_1519 (size=2172137) 2024-12-13T07:36:24,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742343_1519 (size=2172137) 2024-12-13T07:36:24,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742344_1520 (size=213228) 2024-12-13T07:36:24,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742344_1520 (size=213228) 2024-12-13T07:36:24,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742344_1520 (size=213228) 2024-12-13T07:36:24,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742345_1521 (size=1877034) 2024-12-13T07:36:24,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742345_1521 (size=1877034) 2024-12-13T07:36:24,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742345_1521 (size=1877034) 2024-12-13T07:36:24,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742346_1522 (size=533455) 2024-12-13T07:36:24,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742346_1522 (size=533455) 2024-12-13T07:36:24,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742346_1522 (size=533455) 2024-12-13T07:36:24,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742347_1523 (size=7280644) 2024-12-13T07:36:24,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742347_1523 (size=7280644) 2024-12-13T07:36:24,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742347_1523 (size=7280644) 2024-12-13T07:36:24,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742348_1524 (size=4188619) 2024-12-13T07:36:24,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742348_1524 (size=4188619) 2024-12-13T07:36:24,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742348_1524 (size=4188619) 2024-12-13T07:36:24,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742349_1525 (size=20406) 2024-12-13T07:36:24,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742349_1525 (size=20406) 2024-12-13T07:36:24,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742349_1525 (size=20406) 2024-12-13T07:36:24,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742350_1526 (size=75495) 2024-12-13T07:36:24,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742350_1526 (size=75495) 2024-12-13T07:36:24,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742350_1526 (size=75495) 2024-12-13T07:36:24,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742351_1527 (size=45609) 2024-12-13T07:36:24,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742351_1527 (size=45609) 2024-12-13T07:36:24,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742351_1527 (size=45609) 2024-12-13T07:36:24,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742352_1528 (size=110084) 2024-12-13T07:36:24,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742352_1528 (size=110084) 2024-12-13T07:36:24,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742352_1528 (size=110084) 2024-12-13T07:36:24,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742353_1529 (size=1323991) 2024-12-13T07:36:24,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742353_1529 (size=1323991) 2024-12-13T07:36:24,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742353_1529 (size=1323991) 2024-12-13T07:36:24,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742354_1530 (size=23076) 2024-12-13T07:36:24,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742354_1530 (size=23076) 2024-12-13T07:36:24,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742354_1530 (size=23076) 2024-12-13T07:36:24,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742355_1531 (size=126803) 2024-12-13T07:36:24,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742355_1531 (size=126803) 2024-12-13T07:36:24,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742355_1531 (size=126803) 2024-12-13T07:36:24,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742356_1532 (size=322274) 2024-12-13T07:36:24,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742356_1532 (size=322274) 2024-12-13T07:36:24,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742356_1532 (size=322274) 2024-12-13T07:36:24,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742357_1533 (size=1832290) 2024-12-13T07:36:24,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742357_1533 (size=1832290) 2024-12-13T07:36:24,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742357_1533 (size=1832290) 2024-12-13T07:36:24,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742358_1534 (size=30081) 2024-12-13T07:36:24,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742358_1534 (size=30081) 2024-12-13T07:36:24,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742358_1534 (size=30081) 2024-12-13T07:36:24,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742359_1535 (size=53616) 2024-12-13T07:36:24,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742359_1535 (size=53616) 2024-12-13T07:36:24,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742359_1535 (size=53616) 2024-12-13T07:36:24,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742360_1536 (size=451756) 2024-12-13T07:36:24,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742360_1536 (size=451756) 2024-12-13T07:36:24,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742360_1536 (size=451756) 2024-12-13T07:36:24,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742361_1537 (size=29229) 2024-12-13T07:36:24,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742361_1537 (size=29229) 2024-12-13T07:36:24,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742361_1537 (size=29229) 2024-12-13T07:36:24,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742362_1538 (size=169089) 2024-12-13T07:36:24,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742362_1538 (size=169089) 2024-12-13T07:36:24,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742362_1538 (size=169089) 2024-12-13T07:36:24,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742363_1539 (size=5175431) 2024-12-13T07:36:24,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742363_1539 (size=5175431) 2024-12-13T07:36:24,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742363_1539 (size=5175431) 2024-12-13T07:36:24,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742364_1540 (size=136454) 2024-12-13T07:36:24,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742364_1540 (size=136454) 2024-12-13T07:36:24,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742364_1540 (size=136454) 2024-12-13T07:36:24,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742365_1541 (size=907854) 2024-12-13T07:36:24,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742365_1541 (size=907854) 2024-12-13T07:36:24,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742365_1541 (size=907854) 2024-12-13T07:36:24,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742366_1542 (size=3317408) 2024-12-13T07:36:24,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742366_1542 (size=3317408) 2024-12-13T07:36:24,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742366_1542 (size=3317408) 2024-12-13T07:36:24,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742367_1543 (size=503880) 2024-12-13T07:36:24,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742367_1543 (size=503880) 2024-12-13T07:36:24,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742367_1543 (size=503880) 2024-12-13T07:36:24,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742368_1544 (size=4695811) 2024-12-13T07:36:24,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742368_1544 (size=4695811) 2024-12-13T07:36:24,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742368_1544 (size=4695811) 2024-12-13T07:36:24,207 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-13T07:36:24,208 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportFileSystemStateWithSkipTmp' hfile list 2024-12-13T07:36:24,210 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-13T07:36:24,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742369_1545 (size=366) 2024-12-13T07:36:24,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742369_1545 (size=366) 2024-12-13T07:36:24,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742369_1545 (size=366) 2024-12-13T07:36:24,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742370_1546 (size=15) 2024-12-13T07:36:24,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742370_1546 (size=15) 2024-12-13T07:36:24,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742370_1546 (size=15) 2024-12-13T07:36:24,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742371_1547 (size=305057) 2024-12-13T07:36:24,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742371_1547 (size=305057) 2024-12-13T07:36:24,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742371_1547 (size=305057) 2024-12-13T07:36:26,167 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-13T07:36:26,167 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-13T07:36:26,169 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0009_000001 (auth:SIMPLE) from 127.0.0.1:33438 2024-12-13T07:36:26,178 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-1_0/usercache/jenkins/appcache/application_1734075189398_0009/container_1734075189398_0009_01_000001/launch_container.sh] 2024-12-13T07:36:26,178 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-1_0/usercache/jenkins/appcache/application_1734075189398_0009/container_1734075189398_0009_01_000001/container_tokens] 2024-12-13T07:36:26,178 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-1_0/usercache/jenkins/appcache/application_1734075189398_0009/container_1734075189398_0009_01_000001/sysfs] 2024-12-13T07:36:26,989 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0010_000001 (auth:SIMPLE) from 127.0.0.1:37952 2024-12-13T07:36:27,044 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-13T07:36:30,632 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T07:36:31,833 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0010_000001 (auth:SIMPLE) from 127.0.0.1:59944 2024-12-13T07:36:32,021 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:32,021 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp Metrics about Tables on a single HBase RegionServer 2024-12-13T07:36:32,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742372_1548 (size=350755) 2024-12-13T07:36:32,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742372_1548 (size=350755) 2024-12-13T07:36:32,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742372_1548 (size=350755) 2024-12-13T07:36:34,035 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0010_000001 (auth:SIMPLE) from 127.0.0.1:50208 2024-12-13T07:36:36,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742373_1549 (size=8256) 2024-12-13T07:36:36,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742373_1549 (size=8256) 2024-12-13T07:36:36,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742373_1549 (size=8256) 2024-12-13T07:36:36,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742374_1550 (size=5354) 2024-12-13T07:36:36,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742374_1550 (size=5354) 2024-12-13T07:36:36,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742374_1550 (size=5354) 2024-12-13T07:36:36,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742375_1551 (size=17455) 2024-12-13T07:36:36,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742375_1551 (size=17455) 2024-12-13T07:36:36,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742375_1551 (size=17455) 2024-12-13T07:36:36,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742376_1552 (size=476) 2024-12-13T07:36:36,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742376_1552 (size=476) 2024-12-13T07:36:36,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742376_1552 (size=476) 2024-12-13T07:36:36,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742377_1553 (size=17455) 2024-12-13T07:36:36,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742377_1553 (size=17455) 2024-12-13T07:36:36,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742377_1553 (size=17455) 2024-12-13T07:36:36,814 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-0_2/usercache/jenkins/appcache/application_1734075189398_0010/container_1734075189398_0010_01_000002/launch_container.sh] 2024-12-13T07:36:36,814 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-0_2/usercache/jenkins/appcache/application_1734075189398_0010/container_1734075189398_0010_01_000002/container_tokens] 2024-12-13T07:36:36,814 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-0_2/usercache/jenkins/appcache/application_1734075189398_0010/container_1734075189398_0010_01_000002/sysfs] 2024-12-13T07:36:36,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742378_1554 (size=350755) 2024-12-13T07:36:36,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742378_1554 (size=350755) 2024-12-13T07:36:36,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742378_1554 (size=350755) 2024-12-13T07:36:36,848 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0010_000001 (auth:SIMPLE) from 127.0.0.1:50212 2024-12-13T07:36:38,369 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-13T07:36:38,369 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-13T07:36:38,375 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:38,375 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-13T07:36:38,376 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-13T07:36:38,376 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-313103365_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:38,376 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-13T07:36:38,376 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-13T07:36:38,376 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-313103365_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075383022/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075383022/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:38,376 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075383022/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-13T07:36:38,376 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/export-test/export-1734075383022/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-13T07:36:38,381 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:38,382 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:38,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=209, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:38,384 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075398384"}]},"ts":"1734075398384"} 2024-12-13T07:36:38,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=209 2024-12-13T07:36:38,385 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLING in hbase:meta 2024-12-13T07:36:38,424 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLING 2024-12-13T07:36:38,424 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=210, ppid=209, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp}] 2024-12-13T07:36:38,426 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=211, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=66b14d577f845e3d69fc6ad302fa5d68, UNASSIGN}, {pid=212, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=74db0d255f39256144c82caec65ee664, UNASSIGN}] 2024-12-13T07:36:38,426 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=212, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=74db0d255f39256144c82caec65ee664, UNASSIGN 2024-12-13T07:36:38,426 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=211, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=66b14d577f845e3d69fc6ad302fa5d68, UNASSIGN 2024-12-13T07:36:38,427 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=211 updating hbase:meta row=66b14d577f845e3d69fc6ad302fa5d68, regionState=CLOSING, regionLocation=9eb75de94ef3,41895,1734075182430 2024-12-13T07:36:38,427 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=212 updating hbase:meta row=74db0d255f39256144c82caec65ee664, regionState=CLOSING, regionLocation=9eb75de94ef3,42189,1734075182526 2024-12-13T07:36:38,428 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T07:36:38,428 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=213, ppid=211, state=RUNNABLE; CloseRegionProcedure 66b14d577f845e3d69fc6ad302fa5d68, server=9eb75de94ef3,41895,1734075182430}] 2024-12-13T07:36:38,428 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T07:36:38,428 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=214, ppid=212, state=RUNNABLE; CloseRegionProcedure 74db0d255f39256144c82caec65ee664, server=9eb75de94ef3,42189,1734075182526}] 2024-12-13T07:36:38,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=209 2024-12-13T07:36:38,579 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,41895,1734075182430 2024-12-13T07:36:38,580 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] handler.UnassignRegionHandler(124): Close 66b14d577f845e3d69fc6ad302fa5d68 2024-12-13T07:36:38,580 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T07:36:38,580 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1681): Closing 66b14d577f845e3d69fc6ad302fa5d68, disabling compactions & flushes 2024-12-13T07:36:38,580 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1734075381696.66b14d577f845e3d69fc6ad302fa5d68. 2024-12-13T07:36:38,580 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1734075381696.66b14d577f845e3d69fc6ad302fa5d68. 2024-12-13T07:36:38,580 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1734075381696.66b14d577f845e3d69fc6ad302fa5d68. after waiting 0 ms 2024-12-13T07:36:38,580 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1734075381696.66b14d577f845e3d69fc6ad302fa5d68. 2024-12-13T07:36:38,580 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9eb75de94ef3,42189,1734075182526 2024-12-13T07:36:38,581 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] handler.UnassignRegionHandler(124): Close 74db0d255f39256144c82caec65ee664 2024-12-13T07:36:38,581 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T07:36:38,581 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1681): Closing 74db0d255f39256144c82caec65ee664, disabling compactions & flushes 2024-12-13T07:36:38,581 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1734075381696.74db0d255f39256144c82caec65ee664. 2024-12-13T07:36:38,581 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1734075381696.74db0d255f39256144c82caec65ee664. 2024-12-13T07:36:38,581 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1734075381696.74db0d255f39256144c82caec65ee664. after waiting 0 ms 2024-12-13T07:36:38,581 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1734075381696.74db0d255f39256144c82caec65ee664. 2024-12-13T07:36:38,583 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithSkipTmp/66b14d577f845e3d69fc6ad302fa5d68/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T07:36:38,583 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithSkipTmp/74db0d255f39256144c82caec65ee664/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T07:36:38,584 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T07:36:38,584 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithSkipTmp,,1734075381696.66b14d577f845e3d69fc6ad302fa5d68. 2024-12-13T07:36:38,584 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1635): Region close journal for 66b14d577f845e3d69fc6ad302fa5d68: 2024-12-13T07:36:38,584 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T07:36:38,584 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1734075381696.74db0d255f39256144c82caec65ee664. 2024-12-13T07:36:38,584 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1635): Region close journal for 74db0d255f39256144c82caec65ee664: 2024-12-13T07:36:38,585 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] handler.UnassignRegionHandler(170): Closed 66b14d577f845e3d69fc6ad302fa5d68 2024-12-13T07:36:38,585 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=211 updating hbase:meta row=66b14d577f845e3d69fc6ad302fa5d68, regionState=CLOSED 2024-12-13T07:36:38,585 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] handler.UnassignRegionHandler(170): Closed 74db0d255f39256144c82caec65ee664 2024-12-13T07:36:38,586 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=212 updating hbase:meta row=74db0d255f39256144c82caec65ee664, regionState=CLOSED 2024-12-13T07:36:38,587 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=213, resume processing ppid=211 2024-12-13T07:36:38,587 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=213, ppid=211, state=SUCCESS; CloseRegionProcedure 66b14d577f845e3d69fc6ad302fa5d68, server=9eb75de94ef3,41895,1734075182430 in 158 msec 2024-12-13T07:36:38,587 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=214, resume processing ppid=212 2024-12-13T07:36:38,588 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=214, ppid=212, state=SUCCESS; CloseRegionProcedure 74db0d255f39256144c82caec65ee664, server=9eb75de94ef3,42189,1734075182526 in 158 msec 2024-12-13T07:36:38,588 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=211, ppid=210, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=66b14d577f845e3d69fc6ad302fa5d68, UNASSIGN in 161 msec 2024-12-13T07:36:38,589 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=212, resume processing ppid=210 2024-12-13T07:36:38,589 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=212, ppid=210, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=74db0d255f39256144c82caec65ee664, UNASSIGN in 162 msec 2024-12-13T07:36:38,590 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=210, resume processing ppid=209 2024-12-13T07:36:38,590 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=210, ppid=209, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 165 msec 2024-12-13T07:36:38,591 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734075398591"}]},"ts":"1734075398591"} 2024-12-13T07:36:38,594 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLED in hbase:meta 2024-12-13T07:36:38,598 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLED 2024-12-13T07:36:38,600 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=209, state=SUCCESS; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 217 msec 2024-12-13T07:36:38,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=209 2024-12-13T07:36:38,686 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 209 completed 2024-12-13T07:36:38,687 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:38,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] procedure2.ProcedureExecutor(1098): Stored pid=215, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:38,688 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=215, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:38,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:38,688 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=215, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:38,692 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41895 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:38,694 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithSkipTmp/66b14d577f845e3d69fc6ad302fa5d68 2024-12-13T07:36:38,694 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithSkipTmp/74db0d255f39256144c82caec65ee664 2024-12-13T07:36:38,695 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithSkipTmp/74db0d255f39256144c82caec65ee664/cf, FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithSkipTmp/74db0d255f39256144c82caec65ee664/recovered.edits] 2024-12-13T07:36:38,695 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithSkipTmp/66b14d577f845e3d69fc6ad302fa5d68/cf, FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithSkipTmp/66b14d577f845e3d69fc6ad302fa5d68/recovered.edits] 2024-12-13T07:36:38,698 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithSkipTmp/66b14d577f845e3d69fc6ad302fa5d68/cf/d0a5adb89c744600be1051a63ad81854 to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/66b14d577f845e3d69fc6ad302fa5d68/cf/d0a5adb89c744600be1051a63ad81854 2024-12-13T07:36:38,698 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithSkipTmp/74db0d255f39256144c82caec65ee664/cf/cc2521918e904120bd34c6ed464560c2 to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/74db0d255f39256144c82caec65ee664/cf/cc2521918e904120bd34c6ed464560c2 2024-12-13T07:36:38,701 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithSkipTmp/66b14d577f845e3d69fc6ad302fa5d68/recovered.edits/9.seqid to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/66b14d577f845e3d69fc6ad302fa5d68/recovered.edits/9.seqid 2024-12-13T07:36:38,701 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithSkipTmp/74db0d255f39256144c82caec65ee664/recovered.edits/9.seqid to hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/74db0d255f39256144c82caec65ee664/recovered.edits/9.seqid 2024-12-13T07:36:38,701 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithSkipTmp/66b14d577f845e3d69fc6ad302fa5d68 2024-12-13T07:36:38,701 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testtb-testExportFileSystemStateWithSkipTmp/74db0d255f39256144c82caec65ee664 2024-12-13T07:36:38,701 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSkipTmp regions 2024-12-13T07:36:38,703 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=215, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:38,705 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSkipTmp from hbase:meta 2024-12-13T07:36:38,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:38,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:38,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:38,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:38,707 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemStateWithSkipTmp' descriptor. 2024-12-13T07:36:38,708 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-13T07:36:38,708 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-13T07:36:38,708 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-13T07:36:38,708 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-13T07:36:38,708 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=215, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:38,708 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemStateWithSkipTmp' from region states. 2024-12-13T07:36:38,708 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1734075381696.66b14d577f845e3d69fc6ad302fa5d68.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734075398708"}]},"ts":"9223372036854775807"} 2024-12-13T07:36:38,708 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1734075381696.74db0d255f39256144c82caec65ee664.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734075398708"}]},"ts":"9223372036854775807"} 2024-12-13T07:36:38,715 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-13T07:36:38,715 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 66b14d577f845e3d69fc6ad302fa5d68, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1734075381696.66b14d577f845e3d69fc6ad302fa5d68.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 74db0d255f39256144c82caec65ee664, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1734075381696.74db0d255f39256144c82caec65ee664.', STARTKEY => '1', ENDKEY => ''}] 2024-12-13T07:36:38,715 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemStateWithSkipTmp' as deleted. 2024-12-13T07:36:38,715 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734075398715"}]},"ts":"9223372036854775807"} 2024-12-13T07:36:38,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:38,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:38,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:38,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:36:38,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:36:38,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:36:38,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:38,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T07:36:38,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=215 2024-12-13T07:36:38,716 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T07:36:38,716 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T07:36:38,717 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportFileSystemStateWithSkipTmp state from META 2024-12-13T07:36:38,717 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T07:36:38,717 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T07:36:38,724 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=215, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:38,725 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=215, state=SUCCESS; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 37 msec 2024-12-13T07:36:38,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=215 2024-12-13T07:36:38,817 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 215 completed 2024-12-13T07:36:38,823 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSkipTmp" 2024-12-13T07:36:38,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:38,825 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSkipTmp" 2024-12-13T07:36:38,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:38,843 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=813 (was 811) Potentially hanging thread: ApplicationMasterLauncher #18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-23 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-7742 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: ApplicationMasterLauncher #17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-313103365_22 at /127.0.0.1:48820 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1481675330_1 at /127.0.0.1:48790 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1481675330_1 at /127.0.0.1:37806 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-313103365_22 at /127.0.0.1:37830 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39067 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1658146201) connection to localhost/127.0.0.1:39067 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-313103365_22 at /127.0.0.1:44990 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x6218feab-shared-pool-52 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-24 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x6218feab-shared-pool-50 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x6218feab-shared-pool-49 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 79229) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x6218feab-shared-pool-51 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=813 (was 820), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=407 (was 386) - SystemLoadAverage LEAK? -, ProcessCount=20 (was 17) - ProcessCount LEAK? -, AvailableMemoryMB=5462 (was 5918) 2024-12-13T07:36:38,843 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=813 is superior to 500 2024-12-13T07:36:38,843 INFO [Time-limited test {}] hbase.HBaseTestingUtility(2861): Stopping mini mapreduce cluster... 2024-12-13T07:36:38,851 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@826335e{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-13T07:36:38,854 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@54584004{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T07:36:38,854 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T07:36:38,854 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f71bc03{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-13T07:36:38,854 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3c9562ea{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/hadoop.log.dir/,STOPPED} 2024-12-13T07:36:40,580 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region aa4b6ee25986a85d04a0682e5b7b9bda, had cached 0 bytes from a total of 8256 2024-12-13T07:36:40,580 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 9e44ea00e032406763f2d24d62083e47, had cached 0 bytes from a total of 5356 2024-12-13T07:36:42,021 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T07:36:42,914 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734075189398_0010_000001 (auth:SIMPLE) from 127.0.0.1:37074 2024-12-13T07:36:42,923 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-0_1/usercache/jenkins/appcache/application_1734075189398_0010/container_1734075189398_0010_01_000001/launch_container.sh] 2024-12-13T07:36:42,923 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-0_1/usercache/jenkins/appcache/application_1734075189398_0010/container_1734075189398_0010_01_000001/container_tokens] 2024-12-13T07:36:42,923 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/MiniMRCluster_1522358550/yarn-508467533/MiniMRCluster_1522358550-localDir-nm-0_1/usercache/jenkins/appcache/application_1734075189398_0010/container_1734075189398_0010_01_000001/sysfs] 2024-12-13T07:36:44,109 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-13T07:36:55,874 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5db52259{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-13T07:36:55,874 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@58d59e3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T07:36:55,874 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T07:36:55,874 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10274189{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-13T07:36:55,874 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@600ff4a6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/hadoop.log.dir/,STOPPED} 2024-12-13T07:37:00,632 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T07:37:12,890 ERROR [Thread[Thread-417,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-13T07:37:12,891 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2e6298f3{cluster,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-13T07:37:12,891 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6641ae5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T07:37:12,891 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T07:37:12,892 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a82b03d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-13T07:37:12,892 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@89a48b0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/hadoop.log.dir/,STOPPED} 2024-12-13T07:37:12,895 WARN [ApplicationMaster Launcher {}] amlauncher.ApplicationMasterLauncher$LauncherThread(122): org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher$LauncherThread interrupted. Returning. 2024-12-13T07:37:12,901 ERROR [SchedulerEventDispatcher:Event Processor {}] event.EventDispatcher$EventProcessor(72): Returning, interrupted : java.lang.InterruptedException 2024-12-13T07:37:12,901 ERROR [ResourceManager Event Processor Monitor {}] resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessorMonitor(1193): Returning, interrupted : java.lang.InterruptedException: sleep interrupted 2024-12-13T07:37:12,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741830_1006 (size=947206) 2024-12-13T07:37:12,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741830_1006 (size=947206) 2024-12-13T07:37:12,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741830_1006 (size=947206) 2024-12-13T07:37:12,908 ERROR [Thread[Thread-440,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-13T07:37:12,910 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@47a75dcc{jobhistory,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-13T07:37:12,911 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@aa33399{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T07:37:12,911 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T07:37:12,911 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3d703f12{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-13T07:37:12,911 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@793676f0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/hadoop.log.dir/,STOPPED} 2024-12-13T07:37:12,913 ERROR [Thread[Thread-399,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-13T07:37:12,913 INFO [Time-limited test {}] hbase.HBaseTestingUtility(2864): Mini mapreduce cluster stopped 2024-12-13T07:37:12,913 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-13T07:37:12,913 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-13T07:37:12,913 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1b906bf1 to 127.0.0.1:59810 2024-12-13T07:37:12,913 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:37:12,913 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-13T07:37:12,913 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1463198868, stopped=false 2024-12-13T07:37:12,914 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T07:37:12,914 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-13T07:37:12,914 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=9eb75de94ef3,33713,1734075181583 2024-12-13T07:37:12,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-13T07:37:12,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-13T07:37:12,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-13T07:37:12,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-13T07:37:12,965 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-13T07:37:12,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T07:37:12,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T07:37:12,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T07:37:12,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T07:37:12,966 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:37:12,966 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T07:37:12,966 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T07:37:12,966 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '9eb75de94ef3,36021,1734075182342' ***** 2024-12-13T07:37:12,966 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T07:37:12,967 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T07:37:12,967 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-13T07:37:12,967 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '9eb75de94ef3,41895,1734075182430' ***** 2024-12-13T07:37:12,967 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T07:37:12,967 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-13T07:37:12,967 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '9eb75de94ef3,42189,1734075182526' ***** 2024-12-13T07:37:12,967 INFO [RS:0;9eb75de94ef3:36021 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-13T07:37:12,967 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T07:37:12,967 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-13T07:37:12,967 INFO [RS:0;9eb75de94ef3:36021 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-13T07:37:12,967 INFO [RS:1;9eb75de94ef3:41895 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-13T07:37:12,967 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-13T07:37:12,968 INFO [RS:0;9eb75de94ef3:36021 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-13T07:37:12,968 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-13T07:37:12,968 INFO [RS:0;9eb75de94ef3:36021 {}] regionserver.HRegionServer(1224): stopping server 9eb75de94ef3,36021,1734075182342 2024-12-13T07:37:12,968 INFO [RS:1;9eb75de94ef3:41895 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-13T07:37:12,968 INFO [RS:2;9eb75de94ef3:42189 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-13T07:37:12,968 DEBUG [RS:0;9eb75de94ef3:36021 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:37:12,968 INFO [RS:1;9eb75de94ef3:41895 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-13T07:37:12,968 INFO [RS:2;9eb75de94ef3:42189 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-13T07:37:12,968 INFO [RS:2;9eb75de94ef3:42189 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-13T07:37:12,968 INFO [RS:0;9eb75de94ef3:36021 {}] regionserver.HRegionServer(1250): stopping server 9eb75de94ef3,36021,1734075182342; all regions closed. 2024-12-13T07:37:12,968 INFO [RS:1;9eb75de94ef3:41895 {}] regionserver.HRegionServer(3579): Received CLOSE for 4ee0c6377199ce786a0b82a4e00c172f 2024-12-13T07:37:12,968 INFO [RS:2;9eb75de94ef3:42189 {}] regionserver.HRegionServer(3579): Received CLOSE for 9e44ea00e032406763f2d24d62083e47 2024-12-13T07:37:12,968 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T07:37:12,969 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-13T07:37:12,969 INFO [RS:1;9eb75de94ef3:41895 {}] regionserver.HRegionServer(3579): Received CLOSE for aa4b6ee25986a85d04a0682e5b7b9bda 2024-12-13T07:37:12,969 INFO [RS:1;9eb75de94ef3:41895 {}] regionserver.HRegionServer(3579): Received CLOSE for 4825f6265c91ad571de6370b4fecbf02 2024-12-13T07:37:12,969 INFO [RS:1;9eb75de94ef3:41895 {}] regionserver.HRegionServer(1224): stopping server 9eb75de94ef3,41895,1734075182430 2024-12-13T07:37:12,969 DEBUG [RS:1;9eb75de94ef3:41895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:37:12,969 INFO [RS:1;9eb75de94ef3:41895 {}] regionserver.HRegionServer(1599): Waiting on 3 regions to close 2024-12-13T07:37:12,969 DEBUG [RS:1;9eb75de94ef3:41895 {}] regionserver.HRegionServer(1603): Online Regions={4ee0c6377199ce786a0b82a4e00c172f=hbase:namespace,,1734075185364.4ee0c6377199ce786a0b82a4e00c172f., aa4b6ee25986a85d04a0682e5b7b9bda=testExportExpiredSnapshot,1,1734075310170.aa4b6ee25986a85d04a0682e5b7b9bda., 4825f6265c91ad571de6370b4fecbf02=hbase:acl,,1734075186098.4825f6265c91ad571de6370b4fecbf02.} 2024-12-13T07:37:12,970 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 4ee0c6377199ce786a0b82a4e00c172f, disabling compactions & flushes 2024-12-13T07:37:12,970 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734075185364.4ee0c6377199ce786a0b82a4e00c172f. 2024-12-13T07:37:12,970 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734075185364.4ee0c6377199ce786a0b82a4e00c172f. 2024-12-13T07:37:12,970 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734075185364.4ee0c6377199ce786a0b82a4e00c172f. after waiting 0 ms 2024-12-13T07:37:12,970 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734075185364.4ee0c6377199ce786a0b82a4e00c172f. 2024-12-13T07:37:12,970 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 4ee0c6377199ce786a0b82a4e00c172f 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-13T07:37:12,971 INFO [RS:2;9eb75de94ef3:42189 {}] regionserver.HRegionServer(1224): stopping server 9eb75de94ef3,42189,1734075182526 2024-12-13T07:37:12,971 DEBUG [RS:2;9eb75de94ef3:42189 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:37:12,971 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 9e44ea00e032406763f2d24d62083e47, disabling compactions & flushes 2024-12-13T07:37:12,971 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region testExportExpiredSnapshot,,1734075310170.9e44ea00e032406763f2d24d62083e47. 2024-12-13T07:37:12,971 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportExpiredSnapshot,,1734075310170.9e44ea00e032406763f2d24d62083e47. 2024-12-13T07:37:12,971 INFO [RS:2;9eb75de94ef3:42189 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-13T07:37:12,971 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on testExportExpiredSnapshot,,1734075310170.9e44ea00e032406763f2d24d62083e47. after waiting 0 ms 2024-12-13T07:37:12,971 INFO [RS:2;9eb75de94ef3:42189 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-13T07:37:12,971 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region testExportExpiredSnapshot,,1734075310170.9e44ea00e032406763f2d24d62083e47. 2024-12-13T07:37:12,971 INFO [RS:2;9eb75de94ef3:42189 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-13T07:37:12,971 INFO [RS:2;9eb75de94ef3:42189 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-13T07:37:12,973 DEBUG [RS:1;9eb75de94ef3:41895 {}] regionserver.HRegionServer(1629): Waiting on 4825f6265c91ad571de6370b4fecbf02, 4ee0c6377199ce786a0b82a4e00c172f, aa4b6ee25986a85d04a0682e5b7b9bda 2024-12-13T07:37:12,973 INFO [RS:2;9eb75de94ef3:42189 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-13T07:37:12,973 DEBUG [RS:2;9eb75de94ef3:42189 {}] regionserver.HRegionServer(1603): Online Regions={9e44ea00e032406763f2d24d62083e47=testExportExpiredSnapshot,,1734075310170.9e44ea00e032406763f2d24d62083e47., 1588230740=hbase:meta,,1.1588230740} 2024-12-13T07:37:12,973 DEBUG [RS:2;9eb75de94ef3:42189 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 9e44ea00e032406763f2d24d62083e47 2024-12-13T07:37:12,973 DEBUG [RS_CLOSE_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-13T07:37:12,973 INFO [RS_CLOSE_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-13T07:37:12,973 DEBUG [RS_CLOSE_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-13T07:37:12,973 DEBUG [RS_CLOSE_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-13T07:37:12,973 DEBUG [RS_CLOSE_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-13T07:37:12,973 INFO [RS_CLOSE_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=68.66 KB heapSize=109 KB 2024-12-13T07:37:12,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741833_1009 (size=11782) 2024-12-13T07:37:12,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741833_1009 (size=11782) 2024-12-13T07:37:12,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741833_1009 (size=11782) 2024-12-13T07:37:12,980 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportExpiredSnapshot/9e44ea00e032406763f2d24d62083e47/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-13T07:37:12,980 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T07:37:12,981 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed testExportExpiredSnapshot,,1734075310170.9e44ea00e032406763f2d24d62083e47. 2024-12-13T07:37:12,981 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 9e44ea00e032406763f2d24d62083e47: 2024-12-13T07:37:12,981 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,,1734075310170.9e44ea00e032406763f2d24d62083e47. 2024-12-13T07:37:12,982 DEBUG [RS:0;9eb75de94ef3:36021 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/oldWALs 2024-12-13T07:37:12,982 INFO [RS:0;9eb75de94ef3:36021 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 9eb75de94ef3%2C36021%2C1734075182342:(num 1734075184494) 2024-12-13T07:37:12,982 DEBUG [RS:0;9eb75de94ef3:36021 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:37:12,982 INFO [RS:0;9eb75de94ef3:36021 {}] regionserver.LeaseManager(133): Closed leases 2024-12-13T07:37:12,982 INFO [RS:0;9eb75de94ef3:36021 {}] hbase.ChoreService(370): Chore service for: regionserver/9eb75de94ef3:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-13T07:37:12,982 INFO [RS:0;9eb75de94ef3:36021 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-13T07:37:12,982 INFO [RS:0;9eb75de94ef3:36021 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-13T07:37:12,982 INFO [RS:0;9eb75de94ef3:36021 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-13T07:37:12,982 INFO [regionserver/9eb75de94ef3:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-13T07:37:12,983 INFO [RS:0;9eb75de94ef3:36021 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:36021 2024-12-13T07:37:12,988 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/hbase/namespace/4ee0c6377199ce786a0b82a4e00c172f/.tmp/info/68fccd813b6947a3a2b0606a3321e759 is 45, key is default/info:d/1734075185952/Put/seqid=0 2024-12-13T07:37:12,989 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-13T07:37:12,989 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/9eb75de94ef3,36021,1734075182342 2024-12-13T07:37:12,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742379_1555 (size=5037) 2024-12-13T07:37:12,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742379_1555 (size=5037) 2024-12-13T07:37:12,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742379_1555 (size=5037) 2024-12-13T07:37:12,996 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/hbase/namespace/4ee0c6377199ce786a0b82a4e00c172f/.tmp/info/68fccd813b6947a3a2b0606a3321e759 2024-12-13T07:37:12,999 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [9eb75de94ef3,36021,1734075182342] 2024-12-13T07:37:13,000 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 9eb75de94ef3,36021,1734075182342; numProcessing=1 2024-12-13T07:37:13,001 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/hbase/namespace/4ee0c6377199ce786a0b82a4e00c172f/.tmp/info/68fccd813b6947a3a2b0606a3321e759 as hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/hbase/namespace/4ee0c6377199ce786a0b82a4e00c172f/info/68fccd813b6947a3a2b0606a3321e759 2024-12-13T07:37:13,003 DEBUG [RS_CLOSE_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/hbase/meta/1588230740/.tmp/info/566cd06420dd40749fb6faad7fa549c1 is 173, key is testExportExpiredSnapshot,1,1734075310170.aa4b6ee25986a85d04a0682e5b7b9bda./info:regioninfo/1734075310591/Put/seqid=0 2024-12-13T07:37:13,006 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/hbase/namespace/4ee0c6377199ce786a0b82a4e00c172f/info/68fccd813b6947a3a2b0606a3321e759, entries=2, sequenceid=6, filesize=4.9 K 2024-12-13T07:37:13,006 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/9eb75de94ef3,36021,1734075182342 already deleted, retry=false 2024-12-13T07:37:13,006 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 9eb75de94ef3,36021,1734075182342 expired; onlineServers=2 2024-12-13T07:37:13,006 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 4ee0c6377199ce786a0b82a4e00c172f in 36ms, sequenceid=6, compaction requested=false 2024-12-13T07:37:13,008 INFO [regionserver/9eb75de94ef3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-13T07:37:13,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742380_1556 (size=15630) 2024-12-13T07:37:13,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742380_1556 (size=15630) 2024-12-13T07:37:13,008 INFO [regionserver/9eb75de94ef3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-13T07:37:13,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742380_1556 (size=15630) 2024-12-13T07:37:13,010 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/hbase/namespace/4ee0c6377199ce786a0b82a4e00c172f/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T07:37:13,010 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T07:37:13,010 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1734075185364.4ee0c6377199ce786a0b82a4e00c172f. 2024-12-13T07:37:13,010 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 4ee0c6377199ce786a0b82a4e00c172f: 2024-12-13T07:37:13,010 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1734075185364.4ee0c6377199ce786a0b82a4e00c172f. 2024-12-13T07:37:13,010 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing aa4b6ee25986a85d04a0682e5b7b9bda, disabling compactions & flushes 2024-12-13T07:37:13,010 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region testExportExpiredSnapshot,1,1734075310170.aa4b6ee25986a85d04a0682e5b7b9bda. 2024-12-13T07:37:13,010 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportExpiredSnapshot,1,1734075310170.aa4b6ee25986a85d04a0682e5b7b9bda. 2024-12-13T07:37:13,010 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on testExportExpiredSnapshot,1,1734075310170.aa4b6ee25986a85d04a0682e5b7b9bda. after waiting 0 ms 2024-12-13T07:37:13,010 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region testExportExpiredSnapshot,1,1734075310170.aa4b6ee25986a85d04a0682e5b7b9bda. 2024-12-13T07:37:13,011 INFO [RS_CLOSE_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.26 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/hbase/meta/1588230740/.tmp/info/566cd06420dd40749fb6faad7fa549c1 2024-12-13T07:37:13,013 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/default/testExportExpiredSnapshot/aa4b6ee25986a85d04a0682e5b7b9bda/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-13T07:37:13,013 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T07:37:13,013 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed testExportExpiredSnapshot,1,1734075310170.aa4b6ee25986a85d04a0682e5b7b9bda. 2024-12-13T07:37:13,013 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for aa4b6ee25986a85d04a0682e5b7b9bda: 2024-12-13T07:37:13,014 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,1,1734075310170.aa4b6ee25986a85d04a0682e5b7b9bda. 2024-12-13T07:37:13,014 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 4825f6265c91ad571de6370b4fecbf02, disabling compactions & flushes 2024-12-13T07:37:13,014 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:acl,,1734075186098.4825f6265c91ad571de6370b4fecbf02. 2024-12-13T07:37:13,014 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:acl,,1734075186098.4825f6265c91ad571de6370b4fecbf02. 2024-12-13T07:37:13,014 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:acl,,1734075186098.4825f6265c91ad571de6370b4fecbf02. after waiting 0 ms 2024-12-13T07:37:13,014 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:acl,,1734075186098.4825f6265c91ad571de6370b4fecbf02. 2024-12-13T07:37:13,014 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 4825f6265c91ad571de6370b4fecbf02 1/1 column families, dataSize=1.38 KB heapSize=3.33 KB 2024-12-13T07:37:13,027 DEBUG [RS_CLOSE_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/hbase/meta/1588230740/.tmp/rep_barrier/47859afcb7ab47bfa4f42ccc09c5b13d is 133, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291771.339c036edb1d57cbb52ae3ff753d0f92./rep_barrier:/1734075308192/DeleteFamily/seqid=0 2024-12-13T07:37:13,027 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/hbase/acl/4825f6265c91ad571de6370b4fecbf02/.tmp/l/55b30da036bb4aa6b5af0cbfcf994860 is 74, key is testtb-testExportFileSystemStateWithMergeRegion-1/l:/1734075308172/DeleteFamily/seqid=0 2024-12-13T07:37:13,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742382_1558 (size=5695) 2024-12-13T07:37:13,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742381_1557 (size=8007) 2024-12-13T07:37:13,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742382_1558 (size=5695) 2024-12-13T07:37:13,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742382_1558 (size=5695) 2024-12-13T07:37:13,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742381_1557 (size=8007) 2024-12-13T07:37:13,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742381_1557 (size=8007) 2024-12-13T07:37:13,032 INFO [RS_CLOSE_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.34 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/hbase/meta/1588230740/.tmp/rep_barrier/47859afcb7ab47bfa4f42ccc09c5b13d 2024-12-13T07:37:13,032 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.38 KB at sequenceid=27 (bloomFilter=false), to=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/hbase/acl/4825f6265c91ad571de6370b4fecbf02/.tmp/l/55b30da036bb4aa6b5af0cbfcf994860 2024-12-13T07:37:13,034 INFO [regionserver/9eb75de94ef3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-13T07:37:13,035 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 55b30da036bb4aa6b5af0cbfcf994860 2024-12-13T07:37:13,036 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/hbase/acl/4825f6265c91ad571de6370b4fecbf02/.tmp/l/55b30da036bb4aa6b5af0cbfcf994860 as hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/hbase/acl/4825f6265c91ad571de6370b4fecbf02/l/55b30da036bb4aa6b5af0cbfcf994860 2024-12-13T07:37:13,039 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 55b30da036bb4aa6b5af0cbfcf994860 2024-12-13T07:37:13,039 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/hbase/acl/4825f6265c91ad571de6370b4fecbf02/l/55b30da036bb4aa6b5af0cbfcf994860, entries=12, sequenceid=27, filesize=5.6 K 2024-12-13T07:37:13,040 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~1.38 KB/1412, heapSize ~3.31 KB/3392, currentSize=0 B/0 for 4825f6265c91ad571de6370b4fecbf02 in 26ms, sequenceid=27, compaction requested=false 2024-12-13T07:37:13,043 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/hbase/acl/4825f6265c91ad571de6370b4fecbf02/recovered.edits/30.seqid, newMaxSeqId=30, maxSeqId=1 2024-12-13T07:37:13,043 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T07:37:13,043 INFO [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:acl,,1734075186098.4825f6265c91ad571de6370b4fecbf02. 2024-12-13T07:37:13,043 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 4825f6265c91ad571de6370b4fecbf02: 2024-12-13T07:37:13,043 DEBUG [RS_CLOSE_REGION-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:acl,,1734075186098.4825f6265c91ad571de6370b4fecbf02. 2024-12-13T07:37:13,055 DEBUG [RS_CLOSE_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/hbase/meta/1588230740/.tmp/table/e2d69948b17648c1baf293b4e09da8d5 is 127, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1734075291771.339c036edb1d57cbb52ae3ff753d0f92./table:/1734075308192/DeleteFamily/seqid=0 2024-12-13T07:37:13,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073742383_1559 (size=8861) 2024-12-13T07:37:13,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073742383_1559 (size=8861) 2024-12-13T07:37:13,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073742383_1559 (size=8861) 2024-12-13T07:37:13,060 INFO [RS_CLOSE_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.06 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/hbase/meta/1588230740/.tmp/table/e2d69948b17648c1baf293b4e09da8d5 2024-12-13T07:37:13,064 DEBUG [RS_CLOSE_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/hbase/meta/1588230740/.tmp/info/566cd06420dd40749fb6faad7fa549c1 as hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/hbase/meta/1588230740/info/566cd06420dd40749fb6faad7fa549c1 2024-12-13T07:37:13,068 INFO [RS_CLOSE_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/hbase/meta/1588230740/info/566cd06420dd40749fb6faad7fa549c1, entries=84, sequenceid=202, filesize=15.3 K 2024-12-13T07:37:13,069 DEBUG [RS_CLOSE_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/hbase/meta/1588230740/.tmp/rep_barrier/47859afcb7ab47bfa4f42ccc09c5b13d as hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/hbase/meta/1588230740/rep_barrier/47859afcb7ab47bfa4f42ccc09c5b13d 2024-12-13T07:37:13,073 INFO [RS_CLOSE_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/hbase/meta/1588230740/rep_barrier/47859afcb7ab47bfa4f42ccc09c5b13d, entries=21, sequenceid=202, filesize=7.8 K 2024-12-13T07:37:13,074 DEBUG [RS_CLOSE_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/hbase/meta/1588230740/.tmp/table/e2d69948b17648c1baf293b4e09da8d5 as hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/hbase/meta/1588230740/table/e2d69948b17648c1baf293b4e09da8d5 2024-12-13T07:37:13,077 INFO [RS_CLOSE_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/hbase/meta/1588230740/table/e2d69948b17648c1baf293b4e09da8d5, entries=38, sequenceid=202, filesize=8.7 K 2024-12-13T07:37:13,078 INFO [RS_CLOSE_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~68.66 KB/70312, heapSize ~108.95 KB/111568, currentSize=0 B/0 for 1588230740 in 105ms, sequenceid=202, compaction requested=false 2024-12-13T07:37:13,081 DEBUG [RS_CLOSE_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/data/hbase/meta/1588230740/recovered.edits/205.seqid, newMaxSeqId=205, maxSeqId=1 2024-12-13T07:37:13,082 DEBUG [RS_CLOSE_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T07:37:13,082 DEBUG [RS_CLOSE_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-13T07:37:13,082 INFO [RS_CLOSE_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-13T07:37:13,082 DEBUG [RS_CLOSE_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-13T07:37:13,082 DEBUG [RS_CLOSE_META-regionserver/9eb75de94ef3:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-13T07:37:13,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T07:37:13,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1001e4e83250001, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T07:37:13,098 INFO [RS:0;9eb75de94ef3:36021 {}] regionserver.HRegionServer(1307): Exiting; stopping=9eb75de94ef3,36021,1734075182342; zookeeper connection closed. 2024-12-13T07:37:13,098 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@8d8c714 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@8d8c714 2024-12-13T07:37:13,173 INFO [RS:1;9eb75de94ef3:41895 {}] regionserver.HRegionServer(1250): stopping server 9eb75de94ef3,41895,1734075182430; all regions closed. 2024-12-13T07:37:13,173 INFO [RS:2;9eb75de94ef3:42189 {}] regionserver.HRegionServer(1250): stopping server 9eb75de94ef3,42189,1734075182526; all regions closed. 2024-12-13T07:37:13,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741835_1011 (size=16171) 2024-12-13T07:37:13,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741835_1011 (size=16171) 2024-12-13T07:37:13,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741835_1011 (size=16171) 2024-12-13T07:37:13,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741836_1012 (size=80694) 2024-12-13T07:37:13,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741836_1012 (size=80694) 2024-12-13T07:37:13,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741836_1012 (size=80694) 2024-12-13T07:37:13,181 DEBUG [RS:1;9eb75de94ef3:41895 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/oldWALs 2024-12-13T07:37:13,181 INFO [RS:1;9eb75de94ef3:41895 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 9eb75de94ef3%2C41895%2C1734075182430:(num 1734075184510) 2024-12-13T07:37:13,181 DEBUG [RS:1;9eb75de94ef3:41895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:37:13,182 DEBUG [RS:2;9eb75de94ef3:42189 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/oldWALs 2024-12-13T07:37:13,182 INFO [RS:1;9eb75de94ef3:41895 {}] regionserver.LeaseManager(133): Closed leases 2024-12-13T07:37:13,182 INFO [RS:2;9eb75de94ef3:42189 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 9eb75de94ef3%2C42189%2C1734075182526.meta:.meta(num 1734075185055) 2024-12-13T07:37:13,182 INFO [RS:1;9eb75de94ef3:41895 {}] hbase.ChoreService(370): Chore service for: regionserver/9eb75de94ef3:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-13T07:37:13,182 INFO [RS:1;9eb75de94ef3:41895 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-13T07:37:13,182 INFO [RS:1;9eb75de94ef3:41895 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-13T07:37:13,182 INFO [regionserver/9eb75de94ef3:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-13T07:37:13,182 INFO [RS:1;9eb75de94ef3:41895 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-13T07:37:13,183 INFO [RS:1;9eb75de94ef3:41895 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:41895 2024-12-13T07:37:13,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33299 is added to blk_1073741834_1010 (size=10321) 2024-12-13T07:37:13,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40823 is added to blk_1073741834_1010 (size=10321) 2024-12-13T07:37:13,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34409 is added to blk_1073741834_1010 (size=10321) 2024-12-13T07:37:13,185 DEBUG [RS:2;9eb75de94ef3:42189 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/oldWALs 2024-12-13T07:37:13,185 INFO [RS:2;9eb75de94ef3:42189 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 9eb75de94ef3%2C42189%2C1734075182526:(num 1734075184503) 2024-12-13T07:37:13,185 DEBUG [RS:2;9eb75de94ef3:42189 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:37:13,186 INFO [RS:2;9eb75de94ef3:42189 {}] regionserver.LeaseManager(133): Closed leases 2024-12-13T07:37:13,186 INFO [RS:2;9eb75de94ef3:42189 {}] hbase.ChoreService(370): Chore service for: regionserver/9eb75de94ef3:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-13T07:37:13,186 INFO [regionserver/9eb75de94ef3:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-13T07:37:13,186 INFO [RS:2;9eb75de94ef3:42189 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:42189 2024-12-13T07:37:13,189 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-13T07:37:13,189 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/9eb75de94ef3,41895,1734075182430 2024-12-13T07:37:13,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/9eb75de94ef3,42189,1734075182526 2024-12-13T07:37:13,206 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [9eb75de94ef3,42189,1734075182526] 2024-12-13T07:37:13,206 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 9eb75de94ef3,42189,1734075182526; numProcessing=2 2024-12-13T07:37:13,223 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/9eb75de94ef3,42189,1734075182526 already deleted, retry=false 2024-12-13T07:37:13,223 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 9eb75de94ef3,42189,1734075182526 expired; onlineServers=1 2024-12-13T07:37:13,223 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [9eb75de94ef3,41895,1734075182430] 2024-12-13T07:37:13,223 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 9eb75de94ef3,41895,1734075182430; numProcessing=3 2024-12-13T07:37:13,231 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/9eb75de94ef3,41895,1734075182430 already deleted, retry=false 2024-12-13T07:37:13,231 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 9eb75de94ef3,41895,1734075182430 expired; onlineServers=0 2024-12-13T07:37:13,231 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '9eb75de94ef3,33713,1734075181583' ***** 2024-12-13T07:37:13,231 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-13T07:37:13,232 DEBUG [M:0;9eb75de94ef3:33713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25aca118, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9eb75de94ef3/172.17.0.2:0 2024-12-13T07:37:13,232 INFO [M:0;9eb75de94ef3:33713 {}] regionserver.HRegionServer(1224): stopping server 9eb75de94ef3,33713,1734075181583 2024-12-13T07:37:13,232 INFO [M:0;9eb75de94ef3:33713 {}] regionserver.HRegionServer(1250): stopping server 9eb75de94ef3,33713,1734075181583; all regions closed. 2024-12-13T07:37:13,232 DEBUG [M:0;9eb75de94ef3:33713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T07:37:13,232 DEBUG [M:0;9eb75de94ef3:33713 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-13T07:37:13,232 DEBUG [M:0;9eb75de94ef3:33713 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-13T07:37:13,233 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-13T07:37:13,233 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster-HFileCleaner.small.0-1734075184068 {}] cleaner.HFileCleaner(306): Exit Thread[master/9eb75de94ef3:0:becomeActiveMaster-HFileCleaner.small.0-1734075184068,5,FailOnTimeoutGroup] 2024-12-13T07:37:13,233 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster-HFileCleaner.large.0-1734075184059 {}] cleaner.HFileCleaner(306): Exit Thread[master/9eb75de94ef3:0:becomeActiveMaster-HFileCleaner.large.0-1734075184059,5,FailOnTimeoutGroup] 2024-12-13T07:37:13,233 INFO [M:0;9eb75de94ef3:33713 {}] hbase.ChoreService(370): Chore service for: master/9eb75de94ef3:0 had [] on shutdown 2024-12-13T07:37:13,233 DEBUG [M:0;9eb75de94ef3:33713 {}] master.HMaster(1733): Stopping service threads 2024-12-13T07:37:13,233 INFO [M:0;9eb75de94ef3:33713 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-13T07:37:13,234 INFO [M:0;9eb75de94ef3:33713 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-13T07:37:13,235 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-13T07:37:13,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-13T07:37:13,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T07:37:13,240 DEBUG [M:0;9eb75de94ef3:33713 {}] zookeeper.ZKUtil(347): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-13T07:37:13,240 WARN [M:0;9eb75de94ef3:33713 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-13T07:37:13,240 INFO [M:0;9eb75de94ef3:33713 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-13T07:37:13,240 INFO [M:0;9eb75de94ef3:33713 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-13T07:37:13,241 DEBUG [M:0;9eb75de94ef3:33713 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-13T07:37:13,241 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-13T07:37:13,256 INFO [M:0;9eb75de94ef3:33713 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T07:37:13,256 DEBUG [M:0;9eb75de94ef3:33713 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T07:37:13,256 DEBUG [M:0;9eb75de94ef3:33713 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-13T07:37:13,256 DEBUG [M:0;9eb75de94ef3:33713 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T07:37:13,256 INFO [M:0;9eb75de94ef3:33713 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=805.74 KB heapSize=967.27 KB 2024-12-13T07:37:13,256 ERROR [AsyncFSWAL-0-hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/MasterData-prefix:9eb75de94ef3,33713,1734075181583 {}] server.NIOServerCnxnFactory(85): Thread Thread[AsyncFSWAL-0-hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/MasterData-prefix:9eb75de94ef3,33713,1734075181583,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.readableBytes()" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.buffered(FanOutOneBlockAsyncDFSOutput.java:419) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.append(AsyncProtobufLogWriter.java:132) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:830) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:128) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendEntry(AbstractFSWAL.java:1148) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.appendAndSync(AsyncFSWAL.java:500) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.consume(AsyncFSWAL.java:603) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T07:37:13,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T07:37:13,306 INFO [RS:1;9eb75de94ef3:41895 {}] regionserver.HRegionServer(1307): Exiting; stopping=9eb75de94ef3,41895,1734075182430; zookeeper connection closed. 2024-12-13T07:37:13,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41895-0x1001e4e83250002, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T07:37:13,307 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@14997c7e {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@14997c7e 2024-12-13T07:37:13,315 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T07:37:13,315 INFO [RS:2;9eb75de94ef3:42189 {}] regionserver.HRegionServer(1307): Exiting; stopping=9eb75de94ef3,42189,1734075182526; zookeeper connection closed. 2024-12-13T07:37:13,315 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42189-0x1001e4e83250003, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T07:37:13,315 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@55e76003 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@55e76003 2024-12-13T07:37:13,316 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-13T07:37:18,499 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-13T07:37:22,021 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T07:37:22,021 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-13T07:37:22,021 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-13T07:37:22,021 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-13T07:37:22,021 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T07:37:22,021 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-13T07:37:22,022 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T07:37:22,022 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-13T07:37:22,022 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-13T07:37:27,525 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-13T07:37:30,632 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T07:38:00,633 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T07:38:02,631 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=7, created chunk count=10, reused chunk count=22, reuseRatio=68.75% 2024-12-13T07:38:02,631 DEBUG [master/9eb75de94ef3:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-13T07:38:10,616 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;9eb75de94ef3:33713 224 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 15 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@182ecfbe Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 16 Waited count: 18 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@32a5bb4b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 3190 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 32 Waiting on java.util.concurrent.CountDownLatch$Sync@2cc76b33 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 13695 Waited count: 14179 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 8 Waited count: 9 Waiting on java.lang.ref.ReferenceQueue$Lock@2c6d5259 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@330dcb7c Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@258b3810): State: TIMED_WAITING Blocked count: 1 Waited count: 632 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 64 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp2027007014-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff68428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp2027007014-38): State: RUNNABLE Blocked count: 2 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff68428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp2027007014-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff68428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp2027007014-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff68428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp2027007014-41-acceptor-0@4635fff0-ServerConnector@1bde3171{HTTP/1.1, (http/1.1)}{localhost:44519}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp2027007014-42): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp2027007014-43): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp2027007014-44): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-52bd7ad0-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 35 Waited count: 2919 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d37a293 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 41477): State: TIMED_WAITING Blocked count: 1 Waited count: 33 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 63 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@bf12a04): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 106 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@28e374cd): State: TIMED_WAITING Blocked count: 0 Waited count: 63 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 106 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 31125 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 0 Waited count: 1246 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@176f9e6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 41477): State: TIMED_WAITING Blocked count: 110 Waited count: 2141 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 41477): State: TIMED_WAITING Blocked count: 117 Waited count: 2115 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 41477): State: TIMED_WAITING Blocked count: 140 Waited count: 2133 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 41477): State: TIMED_WAITING Blocked count: 106 Waited count: 2112 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 41477): State: TIMED_WAITING Blocked count: 136 Waited count: 2137 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@ab0fb39): State: TIMED_WAITING Blocked count: 0 Waited count: 158 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@5976018a): State: TIMED_WAITING Blocked count: 0 Waited count: 63 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@3aa58b17): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@13ac15a7): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1867710578)): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1363635181-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff68428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1363635181-88-acceptor-0@4c0c4250-ServerConnector@440e205c{HTTP/1.1, (http/1.1)}{localhost:36637}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1363635181-89): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1363635181-90): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-4dd6ff29-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@68ecf444): State: TIMED_WAITING Blocked count: 0 Waited count: 629 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 38399): State: TIMED_WAITING Blocked count: 1 Waited count: 33 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 63 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 281 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2113aed7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-130630477-172.17.0.2-1734075176645 heartbeating to localhost/127.0.0.1:41477): State: TIMED_WAITING Blocked count: 1299 Waited count: 1361 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@402b75c8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 320 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 334 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 327 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 325 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 315 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 113 (IPC Client (1658146201) connection to localhost/127.0.0.1:41477 from jenkins): State: TIMED_WAITING Blocked count: 1051 Waited count: 1052 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 114 (IPC Parameter Sending Thread for localhost/127.0.0.1:41477): State: TIMED_WAITING Blocked count: 0 Waited count: 1810 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp751009864-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff68428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp751009864-122-acceptor-0@170e356c-ServerConnector@42d696f2{HTTP/1.1, (http/1.1)}{localhost:33899}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp751009864-123): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp751009864-124): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-4c16e68d-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@8727970): State: TIMED_WAITING Blocked count: 0 Waited count: 629 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 38381): State: TIMED_WAITING Blocked count: 1 Waited count: 33 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 63 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 2 Waited count: 293 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60cbaf0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-130630477-172.17.0.2-1734075176645 heartbeating to localhost/127.0.0.1:41477): State: TIMED_WAITING Blocked count: 1260 Waited count: 1345 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@270b462c): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 38381): State: TIMED_WAITING Blocked count: 0 Waited count: 335 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 38381): State: TIMED_WAITING Blocked count: 0 Waited count: 361 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 38381): State: TIMED_WAITING Blocked count: 0 Waited count: 345 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 38381): State: TIMED_WAITING Blocked count: 0 Waited count: 339 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 38381): State: TIMED_WAITING Blocked count: 0 Waited count: 352 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp1966195873-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff68428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp1966195873-154-acceptor-0@3e80f22e-ServerConnector@7a125539{HTTP/1.1, (http/1.1)}{localhost:36975}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp1966195873-155): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1966195873-156): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-4ca4f3ae-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@35dfc49): State: TIMED_WAITING Blocked count: 0 Waited count: 628 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 41463): State: TIMED_WAITING Blocked count: 1 Waited count: 33 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 63 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 2 Waited count: 284 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b9b828b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-130630477-172.17.0.2-1734075176645 heartbeating to localhost/127.0.0.1:41477): State: TIMED_WAITING Blocked count: 1227 Waited count: 1343 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@13cf1202): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 41463): State: TIMED_WAITING Blocked count: 0 Waited count: 350 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 41463): State: TIMED_WAITING Blocked count: 0 Waited count: 317 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 41463): State: TIMED_WAITING Blocked count: 0 Waited count: 362 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 41463): State: TIMED_WAITING Blocked count: 0 Waited count: 341 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 41463): State: TIMED_WAITING Blocked count: 0 Waited count: 318 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data3)): State: TIMED_WAITING Blocked count: 34 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data1)): State: TIMED_WAITING Blocked count: 25 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data2)): State: TIMED_WAITING Blocked count: 22 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data4)): State: TIMED_WAITING Blocked count: 30 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data2/current/BP-130630477-172.17.0.2-1734075176645): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data4/current/BP-130630477-172.17.0.2-1734075176645): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data3/current/BP-130630477-172.17.0.2-1734075176645): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data1/current/BP-130630477-172.17.0.2-1734075176645): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 214 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 217 (java.util.concurrent.ThreadPoolExecutor$Worker@7b401e04[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 220 (java.util.concurrent.ThreadPoolExecutor$Worker@6beadbe9[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 221 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 222 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data6)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 227 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data5/current/BP-130630477-172.17.0.2-1734075176645): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data6/current/BP-130630477-172.17.0.2-1734075176645): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 236 (java.util.concurrent.ThreadPoolExecutor$Worker@37ebc9e3[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 239 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:59810): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 238 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 32 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 242 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 158 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 243 (SyncThread:0): State: WAITING Blocked count: 26 Waited count: 678 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@cd27adb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 244 (ProcessThread(sid:0 cport:59810):): State: WAITING Blocked count: 4 Waited count: 800 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@31612979 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 245 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 831 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@73787a64 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 246 (NIOWorkerThread-1): State: WAITING Blocked count: 2 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a39528e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 1 Waited count: 311 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 10 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (Time-limited test-SendThread(127.0.0.1:59810)): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 261 (Time-limited test-EventThread): State: WAITING Blocked count: 21 Waited count: 55 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22306f2a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 262 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-3): State: WAITING Blocked count: 5 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-4): State: WAITING Blocked count: 6 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (zk-event-processor-pool-0): State: WAITING Blocked count: 35 Waited count: 88 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2231d5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-5): State: WAITING Blocked count: 4 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-6): State: WAITING Blocked count: 1 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-7): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-9): State: WAITING Blocked count: 2 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-10): State: WAITING Blocked count: 4 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-11): State: WAITING Blocked count: 5 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-12): State: WAITING Blocked count: 4 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-13): State: WAITING Blocked count: 5 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-14): State: WAITING Blocked count: 1 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-15): State: WAITING Blocked count: 1 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-16): State: WAITING Blocked count: 2 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33713): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@606fbe7d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713): State: WAITING Blocked count: 201 Waited count: 728 Waiting on java.util.concurrent.Semaphore$NonfairSync@2ee87f35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713): State: WAITING Blocked count: 103 Waited count: 420 Waiting on java.util.concurrent.Semaphore$NonfairSync@f801cfd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33713): State: WAITING Blocked count: 59 Waited count: 6201 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1e3d3841 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33713): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33ce1ad Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33713): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33ce1ad Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=33713): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6ec02728 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=33713): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6128a9f9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33713): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@16a06aad Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 287 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=33713): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@7275d05d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 291 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 313 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 335 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 60 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (M:0;9eb75de94ef3:33713): State: TIMED_WAITING Blocked count: 6 Waited count: 2510 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$951/0x00007eff68eeaa28.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) Thread 358 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 32 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/9eb75de94ef3:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (master/9eb75de94ef3:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 364 (org.apache.hadoop.hdfs.PeerCache@5336ad43): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 382 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 3101 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 399 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 72 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 400 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 105 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 413 (Idle-Rpc-Conn-Sweeper-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 55 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 424 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 31 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 30933 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 433 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 17 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 434 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 17 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 448 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4c3da073 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 480 (regionserver/9eb75de94ef3:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@65fbfbaa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 479 (regionserver/9eb75de94ef3:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4ec37b94 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 481 (regionserver/9eb75de94ef3:0.procedureResultReporter): State: WAITING Blocked count: 11 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@50d7b07b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 502 (LeaseRenewer:jenkins.hfs.0@localhost:41477): State: TIMED_WAITING Blocked count: 8 Waited count: 322 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 516 (LeaseRenewer:jenkins.hfs.1@localhost:41477): State: TIMED_WAITING Blocked count: 8 Waited count: 323 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 517 (LeaseRenewer:jenkins.hfs.2@localhost:41477): State: TIMED_WAITING Blocked count: 8 Waited count: 324 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 532 (region-location-0): State: WAITING Blocked count: 10 Waited count: 17 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7148bc2a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 552 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 557 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 570 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 30703 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 582 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 591 (ForkJoinPool.commonPool-worker-1): State: TIMED_WAITING Blocked count: 0 Waited count: 537 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 592 (ForkJoinPool.commonPool-worker-2): State: WAITING Blocked count: 0 Waited count: 371 Waiting on java.util.concurrent.ForkJoinPool@4bd42f02 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 593 (ForkJoinPool.commonPool-worker-3): State: WAITING Blocked count: 0 Waited count: 541 Waiting on java.util.concurrent.ForkJoinPool@4bd42f02 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 617 (region-location-1): State: WAITING Blocked count: 3 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7148bc2a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 618 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7148bc2a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 619 (region-location-3): State: WAITING Blocked count: 1 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7148bc2a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1018 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 354 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1081 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1109 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1120 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 64 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7327ddf7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1179 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1180 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1181 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1534 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@473a8655 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 3271 (region-location-4): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7148bc2a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5078 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5079 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5080 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9063 (AsyncFSWAL-1-hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/MasterData-prefix:9eb75de94ef3,33713,1734075181583): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2d7c1d8f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9068 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-13T07:38:30,633 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T07:39:00,634 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;9eb75de94ef3:33713 220 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 15 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@182ecfbe Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 16 Waited count: 19 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@32a5bb4b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 3789 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 38 Waiting on java.util.concurrent.CountDownLatch$Sync@91f785c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 13695 Waited count: 14180 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 8 Waited count: 9 Waiting on java.lang.ref.ReferenceQueue$Lock@2c6d5259 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@330dcb7c Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@258b3810): State: TIMED_WAITING Blocked count: 1 Waited count: 752 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 76 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp2027007014-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff68428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp2027007014-38): State: RUNNABLE Blocked count: 2 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff68428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp2027007014-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff68428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp2027007014-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff68428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp2027007014-41-acceptor-0@4635fff0-ServerConnector@1bde3171{HTTP/1.1, (http/1.1)}{localhost:44519}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp2027007014-42): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp2027007014-43): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp2027007014-44): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-52bd7ad0-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 35 Waited count: 2919 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d37a293 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 41477): State: TIMED_WAITING Blocked count: 1 Waited count: 39 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 75 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@bf12a04): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 126 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@28e374cd): State: TIMED_WAITING Blocked count: 0 Waited count: 75 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 126 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 37051 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 0 Waited count: 1246 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@176f9e6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 41477): State: TIMED_WAITING Blocked count: 110 Waited count: 2203 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 41477): State: TIMED_WAITING Blocked count: 119 Waited count: 2175 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 41477): State: TIMED_WAITING Blocked count: 148 Waited count: 2193 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 41477): State: TIMED_WAITING Blocked count: 106 Waited count: 2172 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 41477): State: TIMED_WAITING Blocked count: 136 Waited count: 2197 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@ab0fb39): State: TIMED_WAITING Blocked count: 0 Waited count: 188 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@5976018a): State: TIMED_WAITING Blocked count: 0 Waited count: 75 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@3aa58b17): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@13ac15a7): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1867710578)): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1363635181-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff68428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1363635181-88-acceptor-0@4c0c4250-ServerConnector@440e205c{HTTP/1.1, (http/1.1)}{localhost:36637}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1363635181-89): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1363635181-90): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-4dd6ff29-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@68ecf444): State: TIMED_WAITING Blocked count: 0 Waited count: 749 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 38399): State: TIMED_WAITING Blocked count: 1 Waited count: 39 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 75 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 301 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2113aed7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-130630477-172.17.0.2-1734075176645 heartbeating to localhost/127.0.0.1:41477): State: TIMED_WAITING Blocked count: 1320 Waited count: 1404 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@402b75c8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 380 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 394 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 387 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 385 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 375 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 113 (IPC Client (1658146201) connection to localhost/127.0.0.1:41477 from jenkins): State: TIMED_WAITING Blocked count: 1099 Waited count: 1100 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 114 (IPC Parameter Sending Thread for localhost/127.0.0.1:41477): State: TIMED_WAITING Blocked count: 0 Waited count: 1862 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp751009864-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff68428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp751009864-122-acceptor-0@170e356c-ServerConnector@42d696f2{HTTP/1.1, (http/1.1)}{localhost:33899}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp751009864-123): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp751009864-124): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-4c16e68d-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@8727970): State: TIMED_WAITING Blocked count: 0 Waited count: 749 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 38381): State: TIMED_WAITING Blocked count: 1 Waited count: 39 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 75 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 2 Waited count: 313 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60cbaf0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-130630477-172.17.0.2-1734075176645 heartbeating to localhost/127.0.0.1:41477): State: TIMED_WAITING Blocked count: 1280 Waited count: 1390 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@270b462c): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 38381): State: TIMED_WAITING Blocked count: 0 Waited count: 395 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 38381): State: TIMED_WAITING Blocked count: 0 Waited count: 426 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 38381): State: TIMED_WAITING Blocked count: 0 Waited count: 405 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 38381): State: TIMED_WAITING Blocked count: 0 Waited count: 399 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 38381): State: TIMED_WAITING Blocked count: 0 Waited count: 414 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp1966195873-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff68428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp1966195873-154-acceptor-0@3e80f22e-ServerConnector@7a125539{HTTP/1.1, (http/1.1)}{localhost:36975}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp1966195873-155): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1966195873-156): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-4ca4f3ae-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@35dfc49): State: TIMED_WAITING Blocked count: 0 Waited count: 748 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 41463): State: TIMED_WAITING Blocked count: 1 Waited count: 39 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 75 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 2 Waited count: 304 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b9b828b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-130630477-172.17.0.2-1734075176645 heartbeating to localhost/127.0.0.1:41477): State: TIMED_WAITING Blocked count: 1247 Waited count: 1383 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@13cf1202): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 41463): State: TIMED_WAITING Blocked count: 0 Waited count: 410 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 41463): State: TIMED_WAITING Blocked count: 0 Waited count: 377 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 41463): State: TIMED_WAITING Blocked count: 0 Waited count: 422 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 41463): State: TIMED_WAITING Blocked count: 0 Waited count: 401 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 41463): State: TIMED_WAITING Blocked count: 0 Waited count: 378 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data3)): State: TIMED_WAITING Blocked count: 34 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data1)): State: TIMED_WAITING Blocked count: 25 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data2)): State: TIMED_WAITING Blocked count: 22 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data4)): State: TIMED_WAITING Blocked count: 30 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data2/current/BP-130630477-172.17.0.2-1734075176645): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data4/current/BP-130630477-172.17.0.2-1734075176645): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data3/current/BP-130630477-172.17.0.2-1734075176645): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data1/current/BP-130630477-172.17.0.2-1734075176645): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 214 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 217 (java.util.concurrent.ThreadPoolExecutor$Worker@7b401e04[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 220 (java.util.concurrent.ThreadPoolExecutor$Worker@6beadbe9[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 221 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 222 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data6)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 227 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data5/current/BP-130630477-172.17.0.2-1734075176645): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data6/current/BP-130630477-172.17.0.2-1734075176645): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 236 (java.util.concurrent.ThreadPoolExecutor$Worker@37ebc9e3[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 239 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:59810): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 238 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 38 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 242 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 188 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 243 (SyncThread:0): State: WAITING Blocked count: 26 Waited count: 683 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@cd27adb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 244 (ProcessThread(sid:0 cport:59810):): State: WAITING Blocked count: 4 Waited count: 805 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@31612979 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 245 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 836 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@73787a64 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 246 (NIOWorkerThread-1): State: WAITING Blocked count: 2 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a39528e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 1 Waited count: 361 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 10 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (Time-limited test-SendThread(127.0.0.1:59810)): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 261 (Time-limited test-EventThread): State: WAITING Blocked count: 21 Waited count: 55 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22306f2a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 262 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-3): State: WAITING Blocked count: 5 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-4): State: WAITING Blocked count: 6 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (zk-event-processor-pool-0): State: WAITING Blocked count: 35 Waited count: 88 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2231d5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-5): State: WAITING Blocked count: 4 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-6): State: WAITING Blocked count: 1 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-7): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-9): State: WAITING Blocked count: 2 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-10): State: WAITING Blocked count: 4 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-11): State: WAITING Blocked count: 5 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-12): State: WAITING Blocked count: 4 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-13): State: WAITING Blocked count: 5 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-14): State: WAITING Blocked count: 1 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-15): State: WAITING Blocked count: 1 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-16): State: WAITING Blocked count: 2 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33713): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@606fbe7d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713): State: WAITING Blocked count: 201 Waited count: 728 Waiting on java.util.concurrent.Semaphore$NonfairSync@2ee87f35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713): State: WAITING Blocked count: 103 Waited count: 420 Waiting on java.util.concurrent.Semaphore$NonfairSync@f801cfd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33713): State: WAITING Blocked count: 59 Waited count: 6201 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1e3d3841 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33713): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33ce1ad Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33713): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33ce1ad Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=33713): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6ec02728 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=33713): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6128a9f9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33713): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@16a06aad Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 287 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=33713): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@7275d05d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 291 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 313 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 335 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 60 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (M:0;9eb75de94ef3:33713): State: TIMED_WAITING Blocked count: 6 Waited count: 2510 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$951/0x00007eff68eeaa28.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) Thread 358 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 38 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/9eb75de94ef3:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (master/9eb75de94ef3:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 364 (org.apache.hadoop.hdfs.PeerCache@5336ad43): State: TIMED_WAITING Blocked count: 0 Waited count: 124 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 382 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 3701 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 399 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 72 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 400 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 105 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 413 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 68 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@706b6f1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 424 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 37 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 36934 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 433 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 17 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 434 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 17 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 448 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4c3da073 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 480 (regionserver/9eb75de94ef3:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@65fbfbaa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 479 (regionserver/9eb75de94ef3:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4ec37b94 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 481 (regionserver/9eb75de94ef3:0.procedureResultReporter): State: WAITING Blocked count: 11 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@50d7b07b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 532 (region-location-0): State: WAITING Blocked count: 10 Waited count: 17 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7148bc2a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 552 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 557 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 570 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 36704 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 582 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 592 (ForkJoinPool.commonPool-worker-2): State: TIMED_WAITING Blocked count: 0 Waited count: 372 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 593 (ForkJoinPool.commonPool-worker-3): State: WAITING Blocked count: 0 Waited count: 541 Waiting on java.util.concurrent.ForkJoinPool@4bd42f02 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 617 (region-location-1): State: WAITING Blocked count: 3 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7148bc2a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 618 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7148bc2a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 619 (region-location-3): State: WAITING Blocked count: 1 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7148bc2a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1018 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 360 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1081 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1109 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1120 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 64 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7327ddf7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1179 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1180 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1181 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1534 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@473a8655 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 3271 (region-location-4): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7148bc2a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5078 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5079 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5080 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9063 (AsyncFSWAL-1-hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/MasterData-prefix:9eb75de94ef3,33713,1734075181583): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2d7c1d8f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9068 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-13T07:39:30,634 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T07:40:00,634 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;9eb75de94ef3:33713 219 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 15 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@182ecfbe Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 16 Waited count: 20 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 23 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 22 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@32a5bb4b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 4389 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 44 Waiting on java.util.concurrent.CountDownLatch$Sync@1a844f04 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 13695 Waited count: 14181 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 8 Waited count: 9 Waiting on java.lang.ref.ReferenceQueue$Lock@2c6d5259 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@330dcb7c Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@258b3810): State: TIMED_WAITING Blocked count: 1 Waited count: 872 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 88 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp2027007014-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff68428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp2027007014-38): State: RUNNABLE Blocked count: 2 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff68428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp2027007014-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff68428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp2027007014-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff68428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp2027007014-41-acceptor-0@4635fff0-ServerConnector@1bde3171{HTTP/1.1, (http/1.1)}{localhost:44519}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp2027007014-42): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp2027007014-43): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp2027007014-44): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-52bd7ad0-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 35 Waited count: 2919 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d37a293 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 41477): State: TIMED_WAITING Blocked count: 1 Waited count: 45 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 88 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@bf12a04): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 146 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@28e374cd): State: TIMED_WAITING Blocked count: 0 Waited count: 87 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 146 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 42977 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 0 Waited count: 1246 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@176f9e6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 41477): State: TIMED_WAITING Blocked count: 110 Waited count: 2263 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 41477): State: TIMED_WAITING Blocked count: 119 Waited count: 2235 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 41477): State: TIMED_WAITING Blocked count: 151 Waited count: 2254 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 41477): State: TIMED_WAITING Blocked count: 106 Waited count: 2232 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 41477): State: TIMED_WAITING Blocked count: 139 Waited count: 2257 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@ab0fb39): State: TIMED_WAITING Blocked count: 0 Waited count: 218 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@5976018a): State: TIMED_WAITING Blocked count: 0 Waited count: 87 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@3aa58b17): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@13ac15a7): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1867710578)): State: TIMED_WAITING Blocked count: 0 Waited count: 16 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1363635181-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff68428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1363635181-88-acceptor-0@4c0c4250-ServerConnector@440e205c{HTTP/1.1, (http/1.1)}{localhost:36637}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1363635181-89): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1363635181-90): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-4dd6ff29-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@68ecf444): State: TIMED_WAITING Blocked count: 0 Waited count: 869 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 38399): State: TIMED_WAITING Blocked count: 1 Waited count: 45 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 87 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 321 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2113aed7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-130630477-172.17.0.2-1734075176645 heartbeating to localhost/127.0.0.1:41477): State: TIMED_WAITING Blocked count: 1341 Waited count: 1444 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@402b75c8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 440 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 454 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 447 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 445 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 435 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 113 (IPC Client (1658146201) connection to localhost/127.0.0.1:41477 from jenkins): State: TIMED_WAITING Blocked count: 1152 Waited count: 1153 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 114 (IPC Parameter Sending Thread for localhost/127.0.0.1:41477): State: TIMED_WAITING Blocked count: 0 Waited count: 1916 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp751009864-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff68428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp751009864-122-acceptor-0@170e356c-ServerConnector@42d696f2{HTTP/1.1, (http/1.1)}{localhost:33899}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp751009864-123): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp751009864-124): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-4c16e68d-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@8727970): State: TIMED_WAITING Blocked count: 0 Waited count: 869 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 38381): State: TIMED_WAITING Blocked count: 1 Waited count: 45 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 87 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 2 Waited count: 333 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60cbaf0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-130630477-172.17.0.2-1734075176645 heartbeating to localhost/127.0.0.1:41477): State: TIMED_WAITING Blocked count: 1300 Waited count: 1436 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@270b462c): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 38381): State: TIMED_WAITING Blocked count: 0 Waited count: 455 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 38381): State: TIMED_WAITING Blocked count: 0 Waited count: 486 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 38381): State: TIMED_WAITING Blocked count: 0 Waited count: 465 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 38381): State: TIMED_WAITING Blocked count: 0 Waited count: 459 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 38381): State: TIMED_WAITING Blocked count: 0 Waited count: 474 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp1966195873-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff68428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp1966195873-154-acceptor-0@3e80f22e-ServerConnector@7a125539{HTTP/1.1, (http/1.1)}{localhost:36975}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp1966195873-155): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1966195873-156): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-4ca4f3ae-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@35dfc49): State: TIMED_WAITING Blocked count: 0 Waited count: 868 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 41463): State: TIMED_WAITING Blocked count: 1 Waited count: 45 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 87 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 2 Waited count: 324 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b9b828b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-130630477-172.17.0.2-1734075176645 heartbeating to localhost/127.0.0.1:41477): State: TIMED_WAITING Blocked count: 1267 Waited count: 1423 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@13cf1202): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 41463): State: TIMED_WAITING Blocked count: 0 Waited count: 478 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 41463): State: TIMED_WAITING Blocked count: 0 Waited count: 437 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 41463): State: TIMED_WAITING Blocked count: 0 Waited count: 490 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 41463): State: TIMED_WAITING Blocked count: 0 Waited count: 461 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 41463): State: TIMED_WAITING Blocked count: 0 Waited count: 438 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data3)): State: TIMED_WAITING Blocked count: 34 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data1)): State: TIMED_WAITING Blocked count: 25 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data2)): State: TIMED_WAITING Blocked count: 22 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data4)): State: TIMED_WAITING Blocked count: 30 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data2/current/BP-130630477-172.17.0.2-1734075176645): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data4/current/BP-130630477-172.17.0.2-1734075176645): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data3/current/BP-130630477-172.17.0.2-1734075176645): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data1/current/BP-130630477-172.17.0.2-1734075176645): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 214 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 217 (java.util.concurrent.ThreadPoolExecutor$Worker@7b401e04[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 220 (java.util.concurrent.ThreadPoolExecutor$Worker@6beadbe9[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 221 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 222 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data6)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 227 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data5/current/BP-130630477-172.17.0.2-1734075176645): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data6/current/BP-130630477-172.17.0.2-1734075176645): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 236 (java.util.concurrent.ThreadPoolExecutor$Worker@37ebc9e3[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 239 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:59810): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 238 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 44 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 242 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 218 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 243 (SyncThread:0): State: WAITING Blocked count: 26 Waited count: 687 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@cd27adb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 244 (ProcessThread(sid:0 cport:59810):): State: WAITING Blocked count: 4 Waited count: 809 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@31612979 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 245 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 840 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@73787a64 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 246 (NIOWorkerThread-1): State: WAITING Blocked count: 2 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a39528e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 1 Waited count: 411 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 10 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (Time-limited test-SendThread(127.0.0.1:59810)): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 261 (Time-limited test-EventThread): State: WAITING Blocked count: 21 Waited count: 55 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22306f2a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 262 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-3): State: WAITING Blocked count: 5 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-4): State: WAITING Blocked count: 6 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (zk-event-processor-pool-0): State: WAITING Blocked count: 35 Waited count: 88 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2231d5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-5): State: WAITING Blocked count: 4 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-6): State: WAITING Blocked count: 1 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-7): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-9): State: WAITING Blocked count: 2 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-10): State: WAITING Blocked count: 4 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-11): State: WAITING Blocked count: 5 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-12): State: WAITING Blocked count: 4 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-13): State: WAITING Blocked count: 5 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-14): State: WAITING Blocked count: 1 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-15): State: WAITING Blocked count: 1 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-16): State: WAITING Blocked count: 2 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33713): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@606fbe7d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713): State: WAITING Blocked count: 201 Waited count: 728 Waiting on java.util.concurrent.Semaphore$NonfairSync@2ee87f35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713): State: WAITING Blocked count: 103 Waited count: 420 Waiting on java.util.concurrent.Semaphore$NonfairSync@f801cfd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33713): State: WAITING Blocked count: 59 Waited count: 6201 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1e3d3841 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33713): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33ce1ad Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33713): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33ce1ad Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=33713): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6ec02728 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=33713): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6128a9f9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33713): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@16a06aad Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 287 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=33713): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@7275d05d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 291 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 313 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 335 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 60 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (M:0;9eb75de94ef3:33713): State: TIMED_WAITING Blocked count: 6 Waited count: 2510 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$951/0x00007eff68eeaa28.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) Thread 358 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 44 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/9eb75de94ef3:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (master/9eb75de94ef3:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 364 (org.apache.hadoop.hdfs.PeerCache@5336ad43): State: TIMED_WAITING Blocked count: 0 Waited count: 144 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 382 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 4300 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 399 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 72 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 400 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 105 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 413 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 68 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@706b6f1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 424 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 43 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 42935 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 433 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 17 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 434 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 17 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 448 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4c3da073 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 480 (regionserver/9eb75de94ef3:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@65fbfbaa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 479 (regionserver/9eb75de94ef3:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4ec37b94 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 481 (regionserver/9eb75de94ef3:0.procedureResultReporter): State: WAITING Blocked count: 11 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@50d7b07b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 532 (region-location-0): State: WAITING Blocked count: 10 Waited count: 17 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7148bc2a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 552 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 557 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 570 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 42705 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 582 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 593 (ForkJoinPool.commonPool-worker-3): State: TIMED_WAITING Blocked count: 0 Waited count: 542 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 617 (region-location-1): State: WAITING Blocked count: 3 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7148bc2a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 618 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7148bc2a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 619 (region-location-3): State: WAITING Blocked count: 1 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7148bc2a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1018 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 366 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1081 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1109 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1120 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 64 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7327ddf7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1179 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1180 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1181 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1534 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@473a8655 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 3271 (region-location-4): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7148bc2a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5078 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5079 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5080 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9063 (AsyncFSWAL-1-hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/MasterData-prefix:9eb75de94ef3,33713,1734075181583): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2d7c1d8f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9068 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-13T07:40:30,635 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T07:41:00,635 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;9eb75de94ef3:33713 218 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 16 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@182ecfbe Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 17 Waited count: 21 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 26 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@32a5bb4b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 4988 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 50 Waiting on java.util.concurrent.CountDownLatch$Sync@5759b9be Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 13695 Waited count: 14182 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 9 Waited count: 10 Waiting on java.lang.ref.ReferenceQueue$Lock@2c6d5259 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@330dcb7c Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@258b3810): State: TIMED_WAITING Blocked count: 1 Waited count: 992 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 100 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp2027007014-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff68428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp2027007014-38): State: RUNNABLE Blocked count: 2 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff68428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp2027007014-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff68428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp2027007014-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff68428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp2027007014-41-acceptor-0@4635fff0-ServerConnector@1bde3171{HTTP/1.1, (http/1.1)}{localhost:44519}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp2027007014-42): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp2027007014-43): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp2027007014-44): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-52bd7ad0-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 35 Waited count: 2919 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d37a293 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 41477): State: TIMED_WAITING Blocked count: 1 Waited count: 51 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 100 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@bf12a04): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 166 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@28e374cd): State: TIMED_WAITING Blocked count: 0 Waited count: 99 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 166 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 48901 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 0 Waited count: 1246 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@176f9e6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 41477): State: TIMED_WAITING Blocked count: 110 Waited count: 2323 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 41477): State: TIMED_WAITING Blocked count: 119 Waited count: 2295 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 41477): State: TIMED_WAITING Blocked count: 151 Waited count: 2314 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 41477): State: TIMED_WAITING Blocked count: 106 Waited count: 2292 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 41477): State: TIMED_WAITING Blocked count: 139 Waited count: 2317 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@ab0fb39): State: TIMED_WAITING Blocked count: 0 Waited count: 248 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@5976018a): State: TIMED_WAITING Blocked count: 0 Waited count: 99 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@3aa58b17): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@13ac15a7): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1867710578)): State: TIMED_WAITING Blocked count: 0 Waited count: 18 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1363635181-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff68428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1363635181-88-acceptor-0@4c0c4250-ServerConnector@440e205c{HTTP/1.1, (http/1.1)}{localhost:36637}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1363635181-89): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1363635181-90): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-4dd6ff29-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@68ecf444): State: TIMED_WAITING Blocked count: 0 Waited count: 989 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 38399): State: TIMED_WAITING Blocked count: 1 Waited count: 51 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 99 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 341 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2113aed7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-130630477-172.17.0.2-1734075176645 heartbeating to localhost/127.0.0.1:41477): State: TIMED_WAITING Blocked count: 1361 Waited count: 1484 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@402b75c8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 500 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 514 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 507 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 505 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 495 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 113 (IPC Client (1658146201) connection to localhost/127.0.0.1:41477 from jenkins): State: TIMED_WAITING Blocked count: 1212 Waited count: 1213 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 114 (IPC Parameter Sending Thread for localhost/127.0.0.1:41477): State: TIMED_WAITING Blocked count: 0 Waited count: 1976 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp751009864-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff68428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp751009864-122-acceptor-0@170e356c-ServerConnector@42d696f2{HTTP/1.1, (http/1.1)}{localhost:33899}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp751009864-123): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp751009864-124): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-4c16e68d-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@8727970): State: TIMED_WAITING Blocked count: 0 Waited count: 989 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 38381): State: TIMED_WAITING Blocked count: 1 Waited count: 51 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 99 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 2 Waited count: 353 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60cbaf0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-130630477-172.17.0.2-1734075176645 heartbeating to localhost/127.0.0.1:41477): State: TIMED_WAITING Blocked count: 1320 Waited count: 1476 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@270b462c): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 38381): State: TIMED_WAITING Blocked count: 0 Waited count: 515 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 38381): State: TIMED_WAITING Blocked count: 0 Waited count: 548 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 38381): State: TIMED_WAITING Blocked count: 0 Waited count: 528 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 38381): State: TIMED_WAITING Blocked count: 0 Waited count: 540 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 38381): State: TIMED_WAITING Blocked count: 0 Waited count: 554 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp1966195873-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff68428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp1966195873-154-acceptor-0@3e80f22e-ServerConnector@7a125539{HTTP/1.1, (http/1.1)}{localhost:36975}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp1966195873-155): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1966195873-156): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-4ca4f3ae-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@35dfc49): State: TIMED_WAITING Blocked count: 0 Waited count: 988 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 41463): State: TIMED_WAITING Blocked count: 1 Waited count: 51 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 99 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 2 Waited count: 344 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b9b828b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-130630477-172.17.0.2-1734075176645 heartbeating to localhost/127.0.0.1:41477): State: TIMED_WAITING Blocked count: 1287 Waited count: 1463 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@13cf1202): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 41463): State: TIMED_WAITING Blocked count: 0 Waited count: 540 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 41463): State: TIMED_WAITING Blocked count: 0 Waited count: 497 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 41463): State: TIMED_WAITING Blocked count: 0 Waited count: 551 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 41463): State: TIMED_WAITING Blocked count: 0 Waited count: 521 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 41463): State: TIMED_WAITING Blocked count: 0 Waited count: 498 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data3)): State: TIMED_WAITING Blocked count: 34 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data1)): State: TIMED_WAITING Blocked count: 25 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data2)): State: TIMED_WAITING Blocked count: 22 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data4)): State: TIMED_WAITING Blocked count: 30 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data2/current/BP-130630477-172.17.0.2-1734075176645): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data4/current/BP-130630477-172.17.0.2-1734075176645): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data3/current/BP-130630477-172.17.0.2-1734075176645): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data1/current/BP-130630477-172.17.0.2-1734075176645): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 214 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 217 (java.util.concurrent.ThreadPoolExecutor$Worker@7b401e04[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 220 (java.util.concurrent.ThreadPoolExecutor$Worker@6beadbe9[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 221 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 222 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data6)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 227 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data5/current/BP-130630477-172.17.0.2-1734075176645): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data6/current/BP-130630477-172.17.0.2-1734075176645): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 236 (java.util.concurrent.ThreadPoolExecutor$Worker@37ebc9e3[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 239 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:59810): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 238 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 50 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 242 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 248 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 243 (SyncThread:0): State: WAITING Blocked count: 26 Waited count: 691 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@cd27adb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 244 (ProcessThread(sid:0 cport:59810):): State: WAITING Blocked count: 4 Waited count: 813 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@31612979 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 245 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 844 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@73787a64 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 246 (NIOWorkerThread-1): State: WAITING Blocked count: 2 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a39528e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 1 Waited count: 461 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 10 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (Time-limited test-SendThread(127.0.0.1:59810)): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 261 (Time-limited test-EventThread): State: WAITING Blocked count: 21 Waited count: 55 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22306f2a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 262 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-3): State: WAITING Blocked count: 5 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-4): State: WAITING Blocked count: 6 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (zk-event-processor-pool-0): State: WAITING Blocked count: 35 Waited count: 88 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2231d5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-5): State: WAITING Blocked count: 4 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-6): State: WAITING Blocked count: 1 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-7): State: WAITING Blocked count: 3 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-9): State: WAITING Blocked count: 2 Waited count: 136 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-10): State: WAITING Blocked count: 4 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-11): State: WAITING Blocked count: 5 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-12): State: WAITING Blocked count: 4 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-13): State: WAITING Blocked count: 5 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-14): State: WAITING Blocked count: 1 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-15): State: WAITING Blocked count: 1 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-16): State: WAITING Blocked count: 2 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33713): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@606fbe7d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713): State: WAITING Blocked count: 201 Waited count: 728 Waiting on java.util.concurrent.Semaphore$NonfairSync@2ee87f35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713): State: WAITING Blocked count: 103 Waited count: 420 Waiting on java.util.concurrent.Semaphore$NonfairSync@f801cfd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33713): State: WAITING Blocked count: 59 Waited count: 6201 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1e3d3841 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33713): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33ce1ad Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33713): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33ce1ad Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=33713): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6ec02728 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=33713): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6128a9f9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33713): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@16a06aad Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 287 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=33713): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@7275d05d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 291 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 313 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 335 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 60 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (M:0;9eb75de94ef3:33713): State: TIMED_WAITING Blocked count: 6 Waited count: 2510 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$951/0x00007eff68eeaa28.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) Thread 358 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 50 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/9eb75de94ef3:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (master/9eb75de94ef3:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 364 (org.apache.hadoop.hdfs.PeerCache@5336ad43): State: TIMED_WAITING Blocked count: 0 Waited count: 164 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 382 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 4899 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 399 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 72 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 400 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 105 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 413 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 68 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@706b6f1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 424 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 49 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 48936 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 433 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 17 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 434 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 17 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 448 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4c3da073 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 480 (regionserver/9eb75de94ef3:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@65fbfbaa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 479 (regionserver/9eb75de94ef3:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4ec37b94 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 481 (regionserver/9eb75de94ef3:0.procedureResultReporter): State: WAITING Blocked count: 11 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@50d7b07b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 532 (region-location-0): State: WAITING Blocked count: 10 Waited count: 17 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7148bc2a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 552 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 557 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 570 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 48707 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 582 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 617 (region-location-1): State: WAITING Blocked count: 3 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7148bc2a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 618 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7148bc2a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 619 (region-location-3): State: WAITING Blocked count: 1 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7148bc2a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1018 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 372 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1081 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1109 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1120 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 64 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7327ddf7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1179 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1180 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1181 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1534 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@473a8655 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 3271 (region-location-4): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7148bc2a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5078 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5079 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5080 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9063 (AsyncFSWAL-1-hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/MasterData-prefix:9eb75de94ef3,33713,1734075181583): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2d7c1d8f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9068 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-13T07:41:30,635 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T07:42:00,636 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T07:42:13,257 DEBUG [M:0;9eb75de94ef3:33713 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-13T07:42:13,258 WARN [M:0;9eb75de94ef3:33713 {}] region.MasterRegion(134): Failed to close region org.apache.hadoop.hbase.regionserver.wal.WALSyncTimeoutIOException: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=3722, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:883) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.closeRegion(MasterRegion.java:132) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:205) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1758) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1285) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:603) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=3722, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:171) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) ~[classes/:?] ... 20 more 2024-12-13T07:42:13,261 WARN [Close-WAL-Writer-0 {}] wal.AsyncProtobufLogWriter(163): normal close failed, try recover java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.ensureWritable(int)" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.writeInt(FanOutOneBlockAsyncDFSOutput.java:396) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.lambda$writeWALTrailerAndMagic$3(AsyncProtobufLogWriter.java:243) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALMetadata(AsyncProtobufLogWriter.java:201) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALTrailerAndMagic(AsyncProtobufLogWriter.java:236) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractProtobufLogWriter.writeWALTrailer(AbstractProtobufLogWriter.java:252) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:160) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:732) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T07:42:13,316 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-13T07:42:13,316 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-13T07:42:13,316 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file /user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/MasterData/WALs/9eb75de94ef3,33713,1734075181583/9eb75de94ef3%2C33713%2C1734075181583.1734075182994 2024-12-13T07:42:13,317 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/MasterData/WALs/9eb75de94ef3,33713,1734075181583/9eb75de94ef3%2C33713%2C1734075181583.1734075182994 after 1ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:610) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:164) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:732) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T07:42:13,318 WARN [Close-WAL-Writer-0 {}] wal.AsyncFSWAL(734): close old writer failed. java.io.InterruptedIOException: Operation cancelled at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.checkIfCancelled(RecoverLeaseFSUtils.java:269) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:159) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:610) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:164) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:732) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T07:42:13,319 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/MasterData/WALs/9eb75de94ef3,33713,1734075181583/9eb75de94ef3%2C33713%2C1734075181583.1734075182994 2024-12-13T07:42:13,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/MasterData/WALs/9eb75de94ef3,33713,1734075181583/9eb75de94ef3%2C33713%2C1734075181583.1734075182994 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.recoverLease(AsyncFSWAL.java:722) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:735) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;9eb75de94ef3:33713 221 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 16 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@182ecfbe Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 17 Waited count: 22 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 30 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@32a5bb4b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 28 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5587 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 57 Waiting on java.util.concurrent.CountDownLatch$Sync@453bf6ad Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 13695 Waited count: 14183 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 9 Waited count: 10 Waiting on java.lang.ref.ReferenceQueue$Lock@2c6d5259 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@330dcb7c Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@258b3810): State: TIMED_WAITING Blocked count: 1 Waited count: 1112 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 112 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp2027007014-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff68428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp2027007014-38): State: RUNNABLE Blocked count: 2 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff68428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp2027007014-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff68428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp2027007014-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff68428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp2027007014-41-acceptor-0@4635fff0-ServerConnector@1bde3171{HTTP/1.1, (http/1.1)}{localhost:44519}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp2027007014-42): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp2027007014-43): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp2027007014-44): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-52bd7ad0-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 35 Waited count: 2919 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d37a293 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 41477): State: TIMED_WAITING Blocked count: 1 Waited count: 57 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 112 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@bf12a04): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 186 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@28e374cd): State: TIMED_WAITING Blocked count: 0 Waited count: 111 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 186 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 54826 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 0 Waited count: 1246 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@176f9e6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 41477): State: TIMED_WAITING Blocked count: 110 Waited count: 2383 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 41477): State: TIMED_WAITING Blocked count: 119 Waited count: 2355 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 41477): State: TIMED_WAITING Blocked count: 151 Waited count: 2374 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 41477): State: TIMED_WAITING Blocked count: 106 Waited count: 2352 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 41477): State: TIMED_WAITING Blocked count: 139 Waited count: 2377 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@ab0fb39): State: TIMED_WAITING Blocked count: 0 Waited count: 278 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@5976018a): State: TIMED_WAITING Blocked count: 0 Waited count: 111 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@3aa58b17): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@13ac15a7): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1867710578)): State: TIMED_WAITING Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1363635181-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff68428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1363635181-88-acceptor-0@4c0c4250-ServerConnector@440e205c{HTTP/1.1, (http/1.1)}{localhost:36637}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1363635181-89): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1363635181-90): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-4dd6ff29-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@68ecf444): State: TIMED_WAITING Blocked count: 0 Waited count: 1109 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 38399): State: TIMED_WAITING Blocked count: 1 Waited count: 57 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 111 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 361 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2113aed7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-130630477-172.17.0.2-1734075176645 heartbeating to localhost/127.0.0.1:41477): State: TIMED_WAITING Blocked count: 1381 Waited count: 1524 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@402b75c8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 560 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 574 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 567 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 565 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 38399): State: TIMED_WAITING Blocked count: 0 Waited count: 555 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 113 (IPC Client (1658146201) connection to localhost/127.0.0.1:41477 from jenkins): State: TIMED_WAITING Blocked count: 1272 Waited count: 1273 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 114 (IPC Parameter Sending Thread for localhost/127.0.0.1:41477): State: TIMED_WAITING Blocked count: 0 Waited count: 2036 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp751009864-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff68428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp751009864-122-acceptor-0@170e356c-ServerConnector@42d696f2{HTTP/1.1, (http/1.1)}{localhost:33899}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp751009864-123): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp751009864-124): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-4c16e68d-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@8727970): State: TIMED_WAITING Blocked count: 0 Waited count: 1109 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 38381): State: TIMED_WAITING Blocked count: 1 Waited count: 57 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 111 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 2 Waited count: 373 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60cbaf0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-130630477-172.17.0.2-1734075176645 heartbeating to localhost/127.0.0.1:41477): State: TIMED_WAITING Blocked count: 1340 Waited count: 1516 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@270b462c): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 38381): State: TIMED_WAITING Blocked count: 0 Waited count: 575 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 38381): State: TIMED_WAITING Blocked count: 0 Waited count: 612 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 38381): State: TIMED_WAITING Blocked count: 0 Waited count: 588 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 38381): State: TIMED_WAITING Blocked count: 0 Waited count: 602 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 38381): State: TIMED_WAITING Blocked count: 0 Waited count: 615 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp1966195873-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007eff68428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp1966195873-154-acceptor-0@3e80f22e-ServerConnector@7a125539{HTTP/1.1, (http/1.1)}{localhost:36975}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp1966195873-155): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1966195873-156): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-4ca4f3ae-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@35dfc49): State: TIMED_WAITING Blocked count: 0 Waited count: 1108 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 41463): State: TIMED_WAITING Blocked count: 1 Waited count: 57 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 111 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 2 Waited count: 364 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b9b828b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-130630477-172.17.0.2-1734075176645 heartbeating to localhost/127.0.0.1:41477): State: TIMED_WAITING Blocked count: 1307 Waited count: 1503 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@13cf1202): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 41463): State: TIMED_WAITING Blocked count: 0 Waited count: 605 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 41463): State: TIMED_WAITING Blocked count: 0 Waited count: 557 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 41463): State: TIMED_WAITING Blocked count: 0 Waited count: 628 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 41463): State: TIMED_WAITING Blocked count: 0 Waited count: 581 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 41463): State: TIMED_WAITING Blocked count: 0 Waited count: 558 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data3)): State: TIMED_WAITING Blocked count: 34 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data1)): State: TIMED_WAITING Blocked count: 25 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data2)): State: TIMED_WAITING Blocked count: 22 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data4)): State: TIMED_WAITING Blocked count: 30 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data2/current/BP-130630477-172.17.0.2-1734075176645): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data4/current/BP-130630477-172.17.0.2-1734075176645): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data3/current/BP-130630477-172.17.0.2-1734075176645): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data1/current/BP-130630477-172.17.0.2-1734075176645): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 214 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 217 (java.util.concurrent.ThreadPoolExecutor$Worker@7b401e04[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 220 (java.util.concurrent.ThreadPoolExecutor$Worker@6beadbe9[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 221 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 222 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data6)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 227 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data5/current/BP-130630477-172.17.0.2-1734075176645): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data6/current/BP-130630477-172.17.0.2-1734075176645): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 236 (java.util.concurrent.ThreadPoolExecutor$Worker@37ebc9e3[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 239 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:59810): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 238 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 56 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 242 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 278 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 243 (SyncThread:0): State: WAITING Blocked count: 26 Waited count: 696 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@cd27adb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 244 (ProcessThread(sid:0 cport:59810):): State: WAITING Blocked count: 4 Waited count: 818 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@31612979 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 245 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 849 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@73787a64 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 246 (NIOWorkerThread-1): State: WAITING Blocked count: 2 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a39528e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 1 Waited count: 509 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 10 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (Time-limited test-SendThread(127.0.0.1:59810)): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 261 (Time-limited test-EventThread): State: WAITING Blocked count: 21 Waited count: 55 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22306f2a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 262 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 136 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-3): State: WAITING Blocked count: 5 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-4): State: WAITING Blocked count: 6 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (zk-event-processor-pool-0): State: WAITING Blocked count: 35 Waited count: 88 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2231d5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-5): State: WAITING Blocked count: 4 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-6): State: WAITING Blocked count: 1 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-7): State: WAITING Blocked count: 3 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-9): State: WAITING Blocked count: 2 Waited count: 136 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-10): State: WAITING Blocked count: 4 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-11): State: WAITING Blocked count: 5 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-12): State: WAITING Blocked count: 4 Waited count: 136 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-13): State: WAITING Blocked count: 5 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-14): State: WAITING Blocked count: 1 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-15): State: WAITING Blocked count: 1 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-16): State: WAITING Blocked count: 2 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35a2e243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33713): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@606fbe7d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33713): State: WAITING Blocked count: 201 Waited count: 728 Waiting on java.util.concurrent.Semaphore$NonfairSync@2ee87f35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33713): State: WAITING Blocked count: 103 Waited count: 420 Waiting on java.util.concurrent.Semaphore$NonfairSync@f801cfd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33713): State: WAITING Blocked count: 59 Waited count: 6201 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1e3d3841 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33713): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33ce1ad Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33713): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33ce1ad Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=33713): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6ec02728 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=33713): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6128a9f9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33713): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@16a06aad Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 287 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=33713): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@7275d05d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 291 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 313 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 335 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 60 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (M:0;9eb75de94ef3:33713): State: TIMED_WAITING Blocked count: 6 Waited count: 2511 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.shutdown(AbstractFSWAL.java:1011) app//org.apache.hadoop.hbase.wal.AbstractFSWALProvider.shutdown(AbstractFSWALProvider.java:184) app//org.apache.hadoop.hbase.wal.WALFactory.shutdown(WALFactory.java:272) app//org.apache.hadoop.hbase.master.region.MasterRegion.shutdownWAL(MasterRegion.java:140) app//org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:206) app//org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1758) app//org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1285) app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:603) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 56 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/9eb75de94ef3:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (master/9eb75de94ef3:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 364 (org.apache.hadoop.hdfs.PeerCache@5336ad43): State: TIMED_WAITING Blocked count: 0 Waited count: 184 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 382 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5499 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 399 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 72 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 400 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 105 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 413 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 68 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@706b6f1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 424 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 55 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 54938 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 433 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 17 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 434 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 17 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 448 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4c3da073 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 480 (regionserver/9eb75de94ef3:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@65fbfbaa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 479 (regionserver/9eb75de94ef3:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4ec37b94 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 481 (regionserver/9eb75de94ef3:0.procedureResultReporter): State: WAITING Blocked count: 11 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@50d7b07b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 532 (region-location-0): State: WAITING Blocked count: 10 Waited count: 17 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7148bc2a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 552 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 557 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 570 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 54708 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 582 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 617 (region-location-1): State: WAITING Blocked count: 3 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7148bc2a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 618 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7148bc2a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 619 (region-location-3): State: WAITING Blocked count: 1 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7148bc2a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1018 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 378 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1081 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1109 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1120 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 64 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7327ddf7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1179 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1180 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1181 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1534 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@473a8655 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 3271 (region-location-4): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7148bc2a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5078 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5079 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5080 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9063 (AsyncFSWAL-1-hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/MasterData-prefix:9eb75de94ef3,33713,1734075181583): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2d7c1d8f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9068 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 25 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 9069 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9072 (WAL-Shutdown-0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.awaitTermination(ThreadPoolExecutor.java:1464) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doShutdown(AsyncFSWAL.java:793) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:995) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:990) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9073 (Close-WAL-Writer-0): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:166) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.recoverLease(AsyncFSWAL.java:722) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:735) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL$$Lambda$1113/0x00007eff6911bbe0.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-12-13T07:42:17,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=1 on file=hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/MasterData/WALs/9eb75de94ef3,33713,1734075181583/9eb75de94ef3%2C33713%2C1734075181583.1734075182994 after 4001ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.recoverLease(AsyncFSWAL.java:722) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:735) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T07:42:18,261 ERROR [WAL-Shutdown-0 {}] wal.AsyncFSWAL(794): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.async.wait.on.shutdown.seconds" 2024-12-13T07:42:18,262 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-13T07:42:18,262 INFO [M:0;9eb75de94ef3:33713 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-13T07:42:18,262 INFO [M:0;9eb75de94ef3:33713 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:33713 2024-12-13T07:42:18,324 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41477/user/jenkins/test-data/e1502fd6-e220-c100-04f2-f8aba4f26534/MasterData/WALs/9eb75de94ef3,33713,1734075181583/9eb75de94ef3%2C33713%2C1734075181583.1734075182994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.recoverLease(AsyncFSWAL.java:722) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:735) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-12-13T07:42:18,361 DEBUG [M:0;9eb75de94ef3:33713 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/9eb75de94ef3,33713,1734075181583 already deleted, retry=false 2024-12-13T07:42:18,474 INFO [M:0;9eb75de94ef3:33713 {}] regionserver.HRegionServer(1307): Exiting; stopping=9eb75de94ef3,33713,1734075181583; zookeeper connection closed. 2024-12-13T07:42:18,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T07:42:18,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33713-0x1001e4e83250000, quorum=127.0.0.1:59810, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T07:42:18,570 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@721cee68{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T07:42:18,571 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7a125539{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T07:42:18,571 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T07:42:18,571 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1e0ba457{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-13T07:42:18,572 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@57eb71ef{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/hadoop.log.dir/,STOPPED} 2024-12-13T07:42:18,575 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-13T07:42:18,575 WARN [BP-130630477-172.17.0.2-1734075176645 heartbeating to localhost/127.0.0.1:41477 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-13T07:42:18,575 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-13T07:42:18,575 WARN [BP-130630477-172.17.0.2-1734075176645 heartbeating to localhost/127.0.0.1:41477 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-130630477-172.17.0.2-1734075176645 (Datanode Uuid c2e5d365-f7c3-4c18-8e6d-2355967b2d88) service to localhost/127.0.0.1:41477 2024-12-13T07:42:18,578 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data5/current/BP-130630477-172.17.0.2-1734075176645 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T07:42:18,579 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data6/current/BP-130630477-172.17.0.2-1734075176645 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T07:42:18,579 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-13T07:42:18,581 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7d2dc9a9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T07:42:18,582 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@42d696f2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T07:42:18,582 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T07:42:18,582 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4208f97a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-13T07:42:18,582 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7467d7f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/hadoop.log.dir/,STOPPED} 2024-12-13T07:42:18,583 WARN [BP-130630477-172.17.0.2-1734075176645 heartbeating to localhost/127.0.0.1:41477 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-13T07:42:18,583 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-13T07:42:18,583 WARN [BP-130630477-172.17.0.2-1734075176645 heartbeating to localhost/127.0.0.1:41477 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-130630477-172.17.0.2-1734075176645 (Datanode Uuid dd92cc55-e12e-44ba-9ba8-99aa86645a78) service to localhost/127.0.0.1:41477 2024-12-13T07:42:18,583 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-13T07:42:18,584 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data3/current/BP-130630477-172.17.0.2-1734075176645 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T07:42:18,584 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data4/current/BP-130630477-172.17.0.2-1734075176645 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T07:42:18,584 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-13T07:42:18,586 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6d992105{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T07:42:18,587 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@440e205c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T07:42:18,587 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T07:42:18,587 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1815b75{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-13T07:42:18,587 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3ffbec59{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/hadoop.log.dir/,STOPPED} 2024-12-13T07:42:18,588 WARN [BP-130630477-172.17.0.2-1734075176645 heartbeating to localhost/127.0.0.1:41477 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-13T07:42:18,588 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-13T07:42:18,588 WARN [BP-130630477-172.17.0.2-1734075176645 heartbeating to localhost/127.0.0.1:41477 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-130630477-172.17.0.2-1734075176645 (Datanode Uuid 71923675-b3b8-4b71-9f3a-008f5bf139fb) service to localhost/127.0.0.1:41477 2024-12-13T07:42:18,588 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-13T07:42:18,589 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data1/current/BP-130630477-172.17.0.2-1734075176645 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T07:42:18,589 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/cluster_bf6de45b-9795-6374-d322-fedb62c59725/dfs/data/data2/current/BP-130630477-172.17.0.2-1734075176645 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T07:42:18,589 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-13T07:42:18,597 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@59f3fe3e{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-13T07:42:18,597 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1bde3171{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T07:42:18,598 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T07:42:18,598 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@432ebcaa{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-13T07:42:18,598 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@f8ccbbc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5203e9be-dc3e-0990-1e04-c8e779b91a61/hadoop.log.dir/,STOPPED} 2024-12-13T07:42:18,607 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-13T07:42:18,823 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down